From ea8b62e47bdd1a30248bb68bd4be700c701d5685 Mon Sep 17 00:00:00 2001 From: Greg Steuck Date: Tue, 25 Oct 2016 12:43:06 -0700 Subject: [PATCH] Haddock (#3) * Trivial script for regenerating haddocks: exclude .haddock files. * Haddock regen --- docs/haddock/doc-index-47.html | 4 + docs/haddock/doc-index-92.html | 4 + docs/haddock/doc-index-95.html | 4 + docs/haddock/doc-index-A.html | 4 + docs/haddock/doc-index-All.html | 4 + docs/haddock/doc-index-B.html | 4 + docs/haddock/doc-index-C.html | 4 + docs/haddock/doc-index-D.html | 4 + docs/haddock/doc-index-E.html | 4 + docs/haddock/doc-index-F.html | 4 + docs/haddock/doc-index-G.html | 4 + docs/haddock/doc-index-H.html | 4 + docs/haddock/doc-index-I.html | 4 + docs/haddock/doc-index-K.html | 4 + docs/haddock/doc-index-L.html | 4 + docs/haddock/doc-index-M.html | 4 + docs/haddock/doc-index-N.html | 4 + docs/haddock/doc-index-O.html | 4 + docs/haddock/doc-index-P.html | 4 + docs/haddock/doc-index-Q.html | 4 + docs/haddock/doc-index-R.html | 4 + docs/haddock/doc-index-S.html | 4 + docs/haddock/doc-index-T.html | 4 + docs/haddock/doc-index-U.html | 4 + docs/haddock/doc-index-V.html | 4 + docs/haddock/doc-index-W.html | 4 + docs/haddock/doc-index-Z.html | 4 + docs/haddock/doc-index.html | 4 + docs/haddock/frames.html | 30 + docs/haddock/haddock-util.js | 344 + docs/haddock/hslogo-16.png | Bin 0 -> 1684 bytes docs/haddock/index-frames.html | 4 + docs/haddock/index.html | 4 + docs/haddock/minus.gif | Bin 0 -> 56 bytes docs/haddock/ocean.css | 600 + docs/haddock/plus.gif | Bin 0 -> 59 bytes docs/haddock/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-0.1.0.0/TensorFlow-Build.html | 27 + .../TensorFlow-BuildOp.html | 6 + .../TensorFlow-ControlFlow.html | 9 + .../TensorFlow-Internal-FFI.html | 8 + .../TensorFlow-Internal-VarInt.html | 4 + .../tensorflow-0.1.0.0/TensorFlow-Nodes.html | 6 + .../tensorflow-0.1.0.0/TensorFlow-Output.html | 14 + .../TensorFlow-Session.html | 23 + .../tensorflow-0.1.0.0/TensorFlow-Tensor.html | 17 + .../tensorflow-0.1.0.0/TensorFlow-Types.html | 13 + .../haddock/tensorflow-0.1.0.0/doc-index.html | 4 + docs/haddock/tensorflow-0.1.0.0/frames.html | 30 + .../tensorflow-0.1.0.0/haddock-util.js | 344 + docs/haddock/tensorflow-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../tensorflow-0.1.0.0/index-frames.html | 4 + docs/haddock/tensorflow-0.1.0.0/index.html | 4 + .../mini_TensorFlow-Build.html | 4 + .../mini_TensorFlow-BuildOp.html | 4 + .../mini_TensorFlow-ControlFlow.html | 4 + .../mini_TensorFlow-Internal-FFI.html | 4 + .../mini_TensorFlow-Internal-VarInt.html | 4 + .../mini_TensorFlow-Nodes.html | 4 + .../mini_TensorFlow-Output.html | 4 + .../mini_TensorFlow-Session.html | 4 + .../mini_TensorFlow-Tensor.html | 4 + .../mini_TensorFlow-Types.html | 4 + docs/haddock/tensorflow-0.1.0.0/minus.gif | Bin 0 -> 56 bytes docs/haddock/tensorflow-0.1.0.0/ocean.css | 600 + docs/haddock/tensorflow-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/TensorFlow-Build.html | 387 + .../src/TensorFlow-BuildOp.html | 210 + .../src/TensorFlow-ControlFlow.html | 98 + .../src/TensorFlow-Internal-FFI.html | 254 + .../src/TensorFlow-Internal-Raw.html | 514 + .../src/TensorFlow-Internal-VarInt.html | 61 + .../src/TensorFlow-Nodes.html | 152 + .../src/TensorFlow-Orphans.html | 57 + .../src/TensorFlow-Output.html | 167 + .../src/TensorFlow-Session.html | 213 + .../src/TensorFlow-Tensor.html | 96 + .../src/TensorFlow-Types.html | 393 + .../tensorflow-0.1.0.0/src/hscolour.css | 5 + docs/haddock/tensorflow-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../haddock/tensorflow-0.1.0.0/tensorflow.txt | 626 + .../TensorFlow-GenOps-Core.html | 2059 ++ .../doc-index-95.html | 4 + .../doc-index-A.html | 4 + .../doc-index-All.html | 4 + .../doc-index-B.html | 4 + .../doc-index-C.html | 4 + .../doc-index-D.html | 4 + .../doc-index-E.html | 4 + .../doc-index-F.html | 4 + .../doc-index-G.html | 4 + .../doc-index-H.html | 4 + .../doc-index-I.html | 4 + .../doc-index-L.html | 4 + .../doc-index-M.html | 4 + .../doc-index-N.html | 4 + .../doc-index-O.html | 4 + .../doc-index-P.html | 4 + .../doc-index-Q.html | 4 + .../doc-index-R.html | 4 + .../doc-index-S.html | 4 + .../doc-index-T.html | 4 + .../doc-index-U.html | 4 + .../doc-index-V.html | 4 + .../doc-index-W.html | 4 + .../doc-index-Z.html | 4 + .../doc-index.html | 4 + .../tensorflow-core-ops-0.1.0.0/frames.html | 30 + .../haddock-util.js | 344 + .../tensorflow-core-ops-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../tensorflow-core-ops-0.1.0.0/index.html | 4 + .../mini_TensorFlow-GenOps-Core.html | 4 + .../tensorflow-core-ops-0.1.0.0/minus.gif | Bin 0 -> 56 bytes .../tensorflow-core-ops-0.1.0.0/ocean.css | 600 + .../tensorflow-core-ops-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/TensorFlow-GenOps-Core.html | 22618 ++++++++++++++++ .../src/hscolour.css | 5 + .../tensorflow-core-ops-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-core-ops.txt | 4576 ++++ .../TensorFlow-Examples-MNIST-Parse.html | 4 + ...ensorFlow-Examples-MNIST-TrainedGraph.html | 4 + .../tensorflow-mnist-0.1.0.0/doc-index.html | 4 + .../tensorflow-mnist-0.1.0.0/frames.html | 30 + .../tensorflow-mnist-0.1.0.0/haddock-util.js | 344 + .../tensorflow-mnist-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../tensorflow-mnist-0.1.0.0/index.html | 4 + .../mini_TensorFlow-Examples-MNIST-Parse.html | 4 + ...ensorFlow-Examples-MNIST-TrainedGraph.html | 4 + .../tensorflow-mnist-0.1.0.0/minus.gif | Bin 0 -> 56 bytes .../tensorflow-mnist-0.1.0.0/ocean.css | 600 + .../haddock/tensorflow-mnist-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/Paths_tensorflow_mnist.html | 46 + .../src/TensorFlow-Examples-MNIST-Parse.html | 107 + ...ensorFlow-Examples-MNIST-TrainedGraph.html | 41 + .../tensorflow-mnist-0.1.0.0/src/hscolour.css | 5 + .../tensorflow-mnist-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-mnist.txt | 41 + .../TensorFlow-Examples-MNIST-InputData.html | 4 + .../doc-index.html | 4 + .../frames.html | 30 + .../haddock-util.js | 344 + .../hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../index.html | 4 + ...i_TensorFlow-Examples-MNIST-InputData.html | 4 + .../minus.gif | Bin 0 -> 56 bytes .../ocean.css | 600 + .../plus.gif | Bin 0 -> 59 bytes .../Paths_tensorflow_mnist_input_data.html | 46 + .../TensorFlow-Examples-MNIST-InputData.html | 42 + .../src/hscolour.css | 5 + .../synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-mnist-input-data.txt | 19 + .../TensorFlow-OpGen-AttrVal.html | 6 + .../TensorFlow-OpGen.html | 4 + .../tensorflow-opgen-0.1.0.0/doc-index.html | 4 + .../tensorflow-opgen-0.1.0.0/frames.html | 30 + .../tensorflow-opgen-0.1.0.0/haddock-util.js | 344 + .../tensorflow-opgen-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../tensorflow-opgen-0.1.0.0/index.html | 4 + .../mini_TensorFlow-OpGen-AttrVal.html | 4 + .../mini_TensorFlow-OpGen.html | 4 + .../tensorflow-opgen-0.1.0.0/minus.gif | Bin 0 -> 56 bytes .../tensorflow-opgen-0.1.0.0/ocean.css | 600 + .../haddock/tensorflow-opgen-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/TensorFlow-OpGen-AttrVal.html | 131 + .../src/TensorFlow-OpGen.html | 468 + .../tensorflow-opgen-0.1.0.0/src/hscolour.css | 5 + .../tensorflow-opgen-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-opgen.txt | 48 + .../TensorFlow-EmbeddingOps.html | 16 + .../TensorFlow-Gradient.html | 4 + .../TensorFlow-Ops.html | 122 + .../tensorflow-ops-0.1.0.0/doc-index.html | 4 + .../tensorflow-ops-0.1.0.0/frames.html | 30 + .../tensorflow-ops-0.1.0.0/haddock-util.js | 344 + .../tensorflow-ops-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../tensorflow-ops-0.1.0.0/index-frames.html | 4 + .../haddock/tensorflow-ops-0.1.0.0/index.html | 4 + .../mini_TensorFlow-EmbeddingOps.html | 4 + .../mini_TensorFlow-Gradient.html | 4 + .../mini_TensorFlow-Ops.html | 4 + docs/haddock/tensorflow-ops-0.1.0.0/minus.gif | Bin 0 -> 56 bytes docs/haddock/tensorflow-ops-0.1.0.0/ocean.css | 600 + docs/haddock/tensorflow-ops-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/TensorFlow-EmbeddingOps.html | 87 + .../src/TensorFlow-Gradient.html | 708 + .../src/TensorFlow-Ops.html | 307 + .../tensorflow-ops-0.1.0.0/src/hscolour.css | 5 + .../tensorflow-ops-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-ops-0.1.0.0/tensorflow-ops.txt | 378 + ...o-Tensorflow-Core-Framework-AttrValue.html | 4 + ...Proto-Tensorflow-Core-Framework-Graph.html | 4 + ...oto-Tensorflow-Core-Framework-NodeDef.html | 4 + ...Proto-Tensorflow-Core-Framework-OpDef.html | 4 + ...sorflow-Core-Framework-ResourceHandle.html | 4 + ...roto-Tensorflow-Core-Framework-Tensor.html | 4 + ...Tensorflow-Core-Framework-TensorShape.html | 4 + ...Proto-Tensorflow-Core-Framework-Types.html | 4 + ...Proto-Tensorflow-Core-Protobuf-Config.html | 4 + .../doc-index-95.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-A.html | 4 + .../doc-index-All.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-B.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-C.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-D.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-E.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-F.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-G.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-H.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-I.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-K.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-L.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-M.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-N.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-O.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-P.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-R.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-S.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-T.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-U.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index-V.html | 4 + .../tensorflow-proto-0.1.0.0/doc-index.html | 4 + .../tensorflow-proto-0.1.0.0/frames.html | 30 + .../tensorflow-proto-0.1.0.0/haddock-util.js | 344 + .../tensorflow-proto-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../tensorflow-proto-0.1.0.0/index.html | 4 + ...o-Tensorflow-Core-Framework-AttrValue.html | 4 + ...Proto-Tensorflow-Core-Framework-Graph.html | 4 + ...oto-Tensorflow-Core-Framework-NodeDef.html | 4 + ...Proto-Tensorflow-Core-Framework-OpDef.html | 4 + ...sorflow-Core-Framework-ResourceHandle.html | 4 + ...roto-Tensorflow-Core-Framework-Tensor.html | 4 + ...Tensorflow-Core-Framework-TensorShape.html | 4 + ...Proto-Tensorflow-Core-Framework-Types.html | 4 + ...Proto-Tensorflow-Core-Protobuf-Config.html | 4 + .../tensorflow-proto-0.1.0.0/minus.gif | Bin 0 -> 56 bytes .../tensorflow-proto-0.1.0.0/ocean.css | 600 + .../haddock/tensorflow-proto-0.1.0.0/plus.gif | Bin 0 -> 59 bytes ...-Core-Framework-AllocationDescription.html | 233 + ...o-Tensorflow-Core-Framework-AttrValue.html | 762 + ...o-Tensorflow-Core-Framework-CostGraph.html | 570 + ...to-Tensorflow-Core-Framework-Function.html | 586 + ...Proto-Tensorflow-Core-Framework-Graph.html | 198 + ...oto-Tensorflow-Core-Framework-NodeDef.html | 257 + ...Proto-Tensorflow-Core-Framework-OpDef.html | 854 + ...sorflow-Core-Framework-ResourceHandle.html | 182 + ...o-Tensorflow-Core-Framework-StepStats.html | 653 + ...roto-Tensorflow-Core-Framework-Tensor.html | 448 + ...flow-Core-Framework-TensorDescription.html | 187 + ...Tensorflow-Core-Framework-TensorShape.html | 166 + ...Proto-Tensorflow-Core-Framework-Types.html | 355 + ...to-Tensorflow-Core-Framework-Versions.html | 128 + ...Proto-Tensorflow-Core-Protobuf-Config.html | 1671 ++ .../tensorflow-proto-0.1.0.0/src/hscolour.css | 5 + .../tensorflow-proto-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-proto.txt | 663 + .../TensorFlow-Queue.html | 9 + .../tensorflow-queue-0.1.0.0/doc-index.html | 4 + .../tensorflow-queue-0.1.0.0/frames.html | 30 + .../tensorflow-queue-0.1.0.0/haddock-util.js | 344 + .../tensorflow-queue-0.1.0.0/hslogo-16.png | Bin 0 -> 1684 bytes .../index-frames.html | 4 + .../tensorflow-queue-0.1.0.0/index.html | 4 + .../mini_TensorFlow-Queue.html | 4 + .../tensorflow-queue-0.1.0.0/minus.gif | Bin 0 -> 56 bytes .../tensorflow-queue-0.1.0.0/ocean.css | 600 + .../haddock/tensorflow-queue-0.1.0.0/plus.gif | Bin 0 -> 59 bytes .../src/TensorFlow-Queue.html | 89 + .../tensorflow-queue-0.1.0.0/src/hscolour.css | 5 + .../tensorflow-queue-0.1.0.0/synopsis.png | Bin 0 -> 11327 bytes .../tensorflow-queue.txt | 26 + tools/haddock.sh | 3 +- 277 files changed, 52638 insertions(+), 1 deletion(-) create mode 100644 docs/haddock/doc-index-47.html create mode 100644 docs/haddock/doc-index-92.html create mode 100644 docs/haddock/doc-index-95.html create mode 100644 docs/haddock/doc-index-A.html create mode 100644 docs/haddock/doc-index-All.html create mode 100644 docs/haddock/doc-index-B.html create mode 100644 docs/haddock/doc-index-C.html create mode 100644 docs/haddock/doc-index-D.html create mode 100644 docs/haddock/doc-index-E.html create mode 100644 docs/haddock/doc-index-F.html create mode 100644 docs/haddock/doc-index-G.html create mode 100644 docs/haddock/doc-index-H.html create mode 100644 docs/haddock/doc-index-I.html create mode 100644 docs/haddock/doc-index-K.html create mode 100644 docs/haddock/doc-index-L.html create mode 100644 docs/haddock/doc-index-M.html create mode 100644 docs/haddock/doc-index-N.html create mode 100644 docs/haddock/doc-index-O.html create mode 100644 docs/haddock/doc-index-P.html create mode 100644 docs/haddock/doc-index-Q.html create mode 100644 docs/haddock/doc-index-R.html create mode 100644 docs/haddock/doc-index-S.html create mode 100644 docs/haddock/doc-index-T.html create mode 100644 docs/haddock/doc-index-U.html create mode 100644 docs/haddock/doc-index-V.html create mode 100644 docs/haddock/doc-index-W.html create mode 100644 docs/haddock/doc-index-Z.html create mode 100644 docs/haddock/doc-index.html create mode 100644 docs/haddock/frames.html create mode 100644 docs/haddock/haddock-util.js create mode 100644 docs/haddock/hslogo-16.png create mode 100644 docs/haddock/index-frames.html create mode 100644 docs/haddock/index.html create mode 100644 docs/haddock/minus.gif create mode 100644 docs/haddock/ocean.css create mode 100644 docs/haddock/plus.gif create mode 100644 docs/haddock/synopsis.png create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html create mode 100644 docs/haddock/tensorflow-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-0.1.0.0/tensorflow.txt create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-AttrVal.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-AttrVal.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-AttrVal.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/frames.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/index-frames.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/index.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/minus.gif create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/ocean.css create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/plus.gif create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png create mode 100644 docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt diff --git a/docs/haddock/doc-index-47.html b/docs/haddock/doc-index-47.html new file mode 100644 index 0000000..2de9f3b --- /dev/null +++ b/docs/haddock/doc-index-47.html @@ -0,0 +1,4 @@ + (Index - /)

 

Index - /

/=TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-92.html b/docs/haddock/doc-index-92.html new file mode 100644 index 0000000..d5185e5 --- /dev/null +++ b/docs/haddock/doc-index-92.html @@ -0,0 +1,4 @@ + (Index - \)

 

Index - \

\\TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-95.html b/docs/haddock/doc-index-95.html new file mode 100644 index 0000000..7a1c25d --- /dev/null +++ b/docs/haddock/doc-index-95.html @@ -0,0 +1,4 @@ + (Index - _)

 

Index - _

_ArgTensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HostCastTensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_RecvTensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-A.html b/docs/haddock/doc-index-A.html new file mode 100644 index 0000000..a0546b7 --- /dev/null +++ b/docs/haddock/doc-index-A.html @@ -0,0 +1,4 @@ + (Index - A)

 

Index - A

abortTensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
acosTensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session
addInitializerTensorFlow.Build
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSummaryTensorFlow.Build
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
asGraphDefTensorFlow.Build
asinTensorFlow.GenOps.Core
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session
atanTensorFlow.GenOps.Core
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBoolTensorFlow.OpGen.AttrVal
AttrBytesTensorFlow.OpGen.AttrVal
AttrCaseTensorFlow.OpGen.AttrVal
AttrDefTensorFlow.OpGen.AttrVal
attrDefTensorFlow.OpGen.AttrVal
AttrFloatTensorFlow.OpGen.AttrVal
AttributeTensorFlow.Types
AttrInt64TensorFlow.OpGen.AttrVal
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.AttrVal
attrOriginalTensorFlow.OpGen.AttrVal
AttrShapeTensorFlow.OpGen.AttrVal
AttrSingleTensorFlow.OpGen.AttrVal
AttrTemplateTensorFlow.OpGen.AttrVal
attrTemplateTensorFlow.OpGen.AttrVal
AttrTensorTensorFlow.OpGen.AttrVal
AttrTypeTensorFlow.OpGen.AttrVal
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioSummaryTensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-All.html b/docs/haddock/doc-index-All.html new file mode 100644 index 0000000..1550959 --- /dev/null +++ b/docs/haddock/doc-index-All.html @@ -0,0 +1,4 @@ + (Index)

 

Index

/=TensorFlow.Types
abortTensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
acosTensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session
addInitializerTensorFlow.Build
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSummaryTensorFlow.Build
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
asGraphDefTensorFlow.Build
asinTensorFlow.GenOps.Core
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session
atanTensorFlow.GenOps.Core
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBoolTensorFlow.OpGen.AttrVal
AttrBytesTensorFlow.OpGen.AttrVal
AttrCaseTensorFlow.OpGen.AttrVal
AttrDefTensorFlow.OpGen.AttrVal
attrDefTensorFlow.OpGen.AttrVal
AttrFloatTensorFlow.OpGen.AttrVal
AttributeTensorFlow.Types
AttrInt64TensorFlow.OpGen.AttrVal
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.AttrVal
attrOriginalTensorFlow.OpGen.AttrVal
AttrShapeTensorFlow.OpGen.AttrVal
AttrSingleTensorFlow.OpGen.AttrVal
AttrTemplateTensorFlow.OpGen.AttrVal
attrTemplateTensorFlow.OpGen.AttrVal
AttrTensorTensorFlow.OpGen.AttrVal
AttrTypeTensorFlow.OpGen.AttrVal
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioSummaryTensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
BuildTensorFlow.Build
buildTensorFlow.Session
buildAndTensorFlow.Session
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
buildListOpTensorFlow.BuildOp
BuildOpTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildTTensorFlow.Build
buildWithSummaryTensorFlow.Session
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Build
colocateWithTensorFlow.Build
complexTensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
constantTensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build
controlTriggerTensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
DataTypeProto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
debugUrlsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
DT_BOOLProto.Tensorflow.Core.Framework.Types
DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
DT_DOUBLEProto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
DT_FLOATProto.Tensorflow.Core.Framework.Types
DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
DT_HALFProto.Tensorflow.Core.Framework.Types
DT_HALF_REFProto.Tensorflow.Core.Framework.Types
DT_INT16Proto.Tensorflow.Core.Framework.Types
DT_INT16_REFProto.Tensorflow.Core.Framework.Types
DT_INT32Proto.Tensorflow.Core.Framework.Types
DT_INT32_REFProto.Tensorflow.Core.Framework.Types
DT_INT64Proto.Tensorflow.Core.Framework.Types
DT_INT64_REFProto.Tensorflow.Core.Framework.Types
DT_INT8Proto.Tensorflow.Core.Framework.Types
DT_INT8_REFProto.Tensorflow.Core.Framework.Types
DT_INVALIDProto.Tensorflow.Core.Framework.Types
DT_QINT16Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QINT32Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
DT_QINT8Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT16Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT8Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
DT_RESOURCEProto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
DT_STRINGProto.Tensorflow.Core.Framework.Types
DT_STRING_REFProto.Tensorflow.Core.Framework.Types
DT_UINT16Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
DT_UINT8Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeJpegTensorFlow.GenOps.Core
encodePngTensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equalTensorFlow.GenOps.Core
erfTensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor
feedTensorFlow.Tensor
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorListTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
funcProto.Tensorflow.Core.Framework.AttrValue
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getVarIntTensorFlow.Internal.VarInt
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
groupTensorFlow.ControlFlow
halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
hasMinimumProto.Tensorflow.Core.Framework.OpDef
histogramSummaryTensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.ControlFlow
2 (Function)TensorFlow.GenOps.Core
identityReaderTensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imageSummaryTensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
l2LossTensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
lessTensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lgammaTensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
listProto.Tensorflow.Core.Framework.AttrValue
listDiffTensorFlow.GenOps.Core
logTensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logicalAndTensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logSoftmaxTensorFlow.GenOps.Core
logUniformCandidateSamplerTensorFlow.GenOps.Core
lookupTableExportTensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
makeQueue2TensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
maxTensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
meanTensorFlow.GenOps.Core
mergeTensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
mirrorPadTensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
namedTensorFlow.ControlFlow
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Protobuf.Config
NodesTensorFlow.Nodes
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow
2 (Function)TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
numberAttrProto.Tensorflow.Core.Framework.OpDef
numThreadsProto.Tensorflow.Core.Protobuf.Config
oneHotTensorFlow.GenOps.Core
OneOfTensorFlow.Types
OpTensorFlow.Output
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build
OpResultTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
opUnrenderedTensorFlow.Output
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputOpTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputSlotProto.Tensorflow.Core.Protobuf.Config
pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholderWithDefaultTensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
prefixTensorFlow.OpGen
priorityQueueTensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
protoShapeTensorFlow.Types
putVarIntTensorFlow.Internal.VarInt
quantizeAndDequantizeTensorFlow.GenOps.Core
Queue2TensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
realTensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
RefTensorFlow.Tensor
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
RefKindTensorFlow.Tensor
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Build
RenderedTensorFlow.Output
renderedNodeDefsTensorFlow.Build
renderNodeNameTensorFlow.Build
renderOutputTensorFlow.Build
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
2 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runSessionTensorFlow.Session
runSessionWithOptionsTensorFlow.Session
runWithFeedsTensorFlow.Session
runWithFeeds_TensorFlow.Session
run_TensorFlow.Session
sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
saveTensorFlow.Ops
Scalar 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
scalarTensorFlow.Ops
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionOptionTensorFlow.Session
sessionTargetTensorFlow.Session
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sinTensorFlow.GenOps.Core
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
sliceTensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
splitTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summaryProto.Tensorflow.Core.Framework.OpDef
SummaryTensorTensorFlow.Build
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
TemplateTensorFlow.OpGen.AttrVal
templateDefaultTensorFlow.OpGen.AttrVal
templateRestrictionsTensorFlow.OpGen.AttrVal
temporaryVariableTensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor
tensorProto.Tensorflow.Core.Framework.AttrValue
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorAttrTensorFlow.Tensor
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor
TensorKindTensorFlow.Tensor
tensorKindTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
TensorTypeTensorFlow.Types
tensorTypeTensorFlow.Types
TensorTypesTensorFlow.Types
tensorValTensorFlow.Types
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
topK 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
topKV2TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeListAttrProto.Tensorflow.Core.Framework.OpDef
unControlNodeTensorFlow.Output, TensorFlow.Build
uniformCandidateSamplerTensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
UnrenderedTensorFlow.Output
unScalarTensorFlow.Nodes
unsortedSegmentSumTensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
ValueTensorFlow.Tensor
value 
1 (Function)TensorFlow.Tensor
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
ValueKindTensorFlow.Tensor
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
vectorTensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
where'TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
withControlDependenciesTensorFlow.ControlFlow
withDeviceTensorFlow.Build
withNameScopeTensorFlow.Build
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
zeroInitializedVariableTensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
\\TensorFlow.Types
_ArgTensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HostCastTensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_RecvTensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-B.html b/docs/haddock/doc-index-B.html new file mode 100644 index 0000000..417692a --- /dev/null +++ b/docs/haddock/doc-index-B.html @@ -0,0 +1,4 @@ + (Index - B)

 

Index - B

bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
BuildTensorFlow.Build
buildTensorFlow.Session
buildAndTensorFlow.Session
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
buildListOpTensorFlow.BuildOp
BuildOpTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildTTensorFlow.Build
buildWithSummaryTensorFlow.Session
\ No newline at end of file diff --git a/docs/haddock/doc-index-C.html b/docs/haddock/doc-index-C.html new file mode 100644 index 0000000..e970f38 --- /dev/null +++ b/docs/haddock/doc-index-C.html @@ -0,0 +1,4 @@ + (Index - C)

 

Index - C

cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Build
colocateWithTensorFlow.Build
complexTensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
constantTensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build
controlTriggerTensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-D.html b/docs/haddock/doc-index-D.html new file mode 100644 index 0000000..567d5a8 --- /dev/null +++ b/docs/haddock/doc-index-D.html @@ -0,0 +1,4 @@ + (Index - D)

 

Index - D

DataTypeProto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
debugUrlsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
DT_BOOLProto.Tensorflow.Core.Framework.Types
DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
DT_DOUBLEProto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
DT_FLOATProto.Tensorflow.Core.Framework.Types
DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
DT_HALFProto.Tensorflow.Core.Framework.Types
DT_HALF_REFProto.Tensorflow.Core.Framework.Types
DT_INT16Proto.Tensorflow.Core.Framework.Types
DT_INT16_REFProto.Tensorflow.Core.Framework.Types
DT_INT32Proto.Tensorflow.Core.Framework.Types
DT_INT32_REFProto.Tensorflow.Core.Framework.Types
DT_INT64Proto.Tensorflow.Core.Framework.Types
DT_INT64_REFProto.Tensorflow.Core.Framework.Types
DT_INT8Proto.Tensorflow.Core.Framework.Types
DT_INT8_REFProto.Tensorflow.Core.Framework.Types
DT_INVALIDProto.Tensorflow.Core.Framework.Types
DT_QINT16Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QINT32Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
DT_QINT8Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT16Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT8Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
DT_RESOURCEProto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
DT_STRINGProto.Tensorflow.Core.Framework.Types
DT_STRING_REFProto.Tensorflow.Core.Framework.Types
DT_UINT16Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
DT_UINT8Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-E.html b/docs/haddock/doc-index-E.html new file mode 100644 index 0000000..2ce0f45 --- /dev/null +++ b/docs/haddock/doc-index-E.html @@ -0,0 +1,4 @@ + (Index - E)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-F.html b/docs/haddock/doc-index-F.html new file mode 100644 index 0000000..8e297d2 --- /dev/null +++ b/docs/haddock/doc-index-F.html @@ -0,0 +1,4 @@ + (Index - F)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-G.html b/docs/haddock/doc-index-G.html new file mode 100644 index 0000000..03eeac6 --- /dev/null +++ b/docs/haddock/doc-index-G.html @@ -0,0 +1,4 @@ + (Index - G)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-H.html b/docs/haddock/doc-index-H.html new file mode 100644 index 0000000..34f5682 --- /dev/null +++ b/docs/haddock/doc-index-H.html @@ -0,0 +1,4 @@ + (Index - H)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-I.html b/docs/haddock/doc-index-I.html new file mode 100644 index 0000000..81c2dfc --- /dev/null +++ b/docs/haddock/doc-index-I.html @@ -0,0 +1,4 @@ + (Index - I)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-K.html b/docs/haddock/doc-index-K.html new file mode 100644 index 0000000..d84e5d6 --- /dev/null +++ b/docs/haddock/doc-index-K.html @@ -0,0 +1,4 @@ + (Index - K)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-L.html b/docs/haddock/doc-index-L.html new file mode 100644 index 0000000..28d02bc --- /dev/null +++ b/docs/haddock/doc-index-L.html @@ -0,0 +1,4 @@ + (Index - L)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-M.html b/docs/haddock/doc-index-M.html new file mode 100644 index 0000000..41df9e3 --- /dev/null +++ b/docs/haddock/doc-index-M.html @@ -0,0 +1,4 @@ + (Index - M)

 

Index - M

makeQueue2TensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
maxTensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
meanTensorFlow.GenOps.Core
mergeTensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
mirrorPadTensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-N.html b/docs/haddock/doc-index-N.html new file mode 100644 index 0000000..38cdec6 --- /dev/null +++ b/docs/haddock/doc-index-N.html @@ -0,0 +1,4 @@ + (Index - N)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-O.html b/docs/haddock/doc-index-O.html new file mode 100644 index 0000000..fb6acda --- /dev/null +++ b/docs/haddock/doc-index-O.html @@ -0,0 +1,4 @@ + (Index - O)

 

Index - O

oneHotTensorFlow.GenOps.Core
OneOfTensorFlow.Types
OpTensorFlow.Output
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build
OpResultTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
opUnrenderedTensorFlow.Output
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputOpTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputSlotProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-P.html b/docs/haddock/doc-index-P.html new file mode 100644 index 0000000..f1ecc52 --- /dev/null +++ b/docs/haddock/doc-index-P.html @@ -0,0 +1,4 @@ + (Index - P)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-Q.html b/docs/haddock/doc-index-Q.html new file mode 100644 index 0000000..b605f6b --- /dev/null +++ b/docs/haddock/doc-index-Q.html @@ -0,0 +1,4 @@ + (Index - Q)

 

Index - Q

quantizeAndDequantizeTensorFlow.GenOps.Core
Queue2TensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-R.html b/docs/haddock/doc-index-R.html new file mode 100644 index 0000000..0d449a0 --- /dev/null +++ b/docs/haddock/doc-index-R.html @@ -0,0 +1,4 @@ + (Index - R)

 

Index - R

randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
realTensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
RefTensorFlow.Tensor
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
RefKindTensorFlow.Tensor
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Build
RenderedTensorFlow.Output
renderedNodeDefsTensorFlow.Build
renderNodeNameTensorFlow.Build
renderOutputTensorFlow.Build
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
2 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runSessionTensorFlow.Session
runSessionWithOptionsTensorFlow.Session
runWithFeedsTensorFlow.Session
runWithFeeds_TensorFlow.Session
run_TensorFlow.Session
\ No newline at end of file diff --git a/docs/haddock/doc-index-S.html b/docs/haddock/doc-index-S.html new file mode 100644 index 0000000..505dbb5 --- /dev/null +++ b/docs/haddock/doc-index-S.html @@ -0,0 +1,4 @@ + (Index - S)

 

Index - S

sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
saveTensorFlow.Ops
Scalar 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
scalarTensorFlow.Ops
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionOptionTensorFlow.Session
sessionTargetTensorFlow.Session
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sinTensorFlow.GenOps.Core
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
sliceTensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
splitTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summaryProto.Tensorflow.Core.Framework.OpDef
SummaryTensorTensorFlow.Build
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-T.html b/docs/haddock/doc-index-T.html new file mode 100644 index 0000000..2eb505c --- /dev/null +++ b/docs/haddock/doc-index-T.html @@ -0,0 +1,4 @@ + (Index - T)

 

Index - T

tanTensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
TemplateTensorFlow.OpGen.AttrVal
templateDefaultTensorFlow.OpGen.AttrVal
templateRestrictionsTensorFlow.OpGen.AttrVal
temporaryVariableTensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor
tensorProto.Tensorflow.Core.Framework.AttrValue
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorAttrTensorFlow.Tensor
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor
TensorKindTensorFlow.Tensor
tensorKindTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
TensorTypeTensorFlow.Types
tensorTypeTensorFlow.Types
TensorTypesTensorFlow.Types
tensorValTensorFlow.Types
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
topK 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
topKV2TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeListAttrProto.Tensorflow.Core.Framework.OpDef
\ No newline at end of file diff --git a/docs/haddock/doc-index-U.html b/docs/haddock/doc-index-U.html new file mode 100644 index 0000000..e9ab94a --- /dev/null +++ b/docs/haddock/doc-index-U.html @@ -0,0 +1,4 @@ + (Index - U)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-V.html b/docs/haddock/doc-index-V.html new file mode 100644 index 0000000..70ac6d0 --- /dev/null +++ b/docs/haddock/doc-index-V.html @@ -0,0 +1,4 @@ + (Index - V)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-W.html b/docs/haddock/doc-index-W.html new file mode 100644 index 0000000..1b704d9 --- /dev/null +++ b/docs/haddock/doc-index-W.html @@ -0,0 +1,4 @@ + (Index - W)

 

\ No newline at end of file diff --git a/docs/haddock/doc-index-Z.html b/docs/haddock/doc-index-Z.html new file mode 100644 index 0000000..473d679 --- /dev/null +++ b/docs/haddock/doc-index-Z.html @@ -0,0 +1,4 @@ + (Index - Z)

 

Index - Z

zeroInitializedVariableTensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index.html b/docs/haddock/doc-index.html new file mode 100644 index 0000000..07ad04d --- /dev/null +++ b/docs/haddock/doc-index.html @@ -0,0 +1,4 @@ + (Index)

 

\ No newline at end of file diff --git a/docs/haddock/frames.html b/docs/haddock/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/haddock-util.js b/docs/haddock/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "" + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/hslogo-16.png b/docs/haddock/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000 \ No newline at end of file diff --git a/docs/haddock/index.html b/docs/haddock/index.html new file mode 100644 index 0000000..08c86c5 --- /dev/null +++ b/docs/haddock/index.html @@ -0,0 +1,4 @@ + \ No newline at end of file diff --git a/docs/haddock/minus.gif b/docs/haddock/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/plus.gif b/docs/haddock/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/synopsis.png b/docs/haddock/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html new file mode 100644 index 0000000..24b3670 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html @@ -0,0 +1,27 @@ +TensorFlow.Build

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Build

    Graph node types

    newtype ControlNode Source

    A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

    Constructors

    ControlNode 

    Fields

    unControlNode :: Op
     

    Ops

    The Build monad

    render :: Tensor v a -> Build (Tensor v a) Source

    Render a Tensor, fixing its name, scope, device and control inputs from + the Build context. Also renders any dependencies of the Tensor that + weren't already rendered.

    This operation is idempotent; render >=> render === render. However, + rendering a (previously un-rendered) Tensor in two different contexts + may result in two different Tensors.

    renderNodeName :: Tensor v a -> Build NodeName Source

    Render a Tensor and get its node's name.

    data BuildT m a Source

    An action for building nodes in a TensorFlow graph. + Used to manage build state internally as part of the Session monad.

    type Build = BuildT Identity Source

    An action for building nodes in a TensorFlow graph.

    addInitializer :: ControlNode -> Build () Source

    Registers the given node to be executed before the next + run.

    hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b Source

    This is Control.Monad.Morph.hoist sans the dependency.

    evalBuildT :: Monad m => BuildT m a -> m a Source

    asGraphDef :: Build a -> GraphDef Source

    Produce a GraphDef proto representation of the nodes that are rendered in + the given Build action.

    flushInitializers :: Monad m => BuildT m [NodeName] Source

    Get all the initializers that have accumulated so far, and clear + that buffer.

    flushNodeBuffer :: Monad m => BuildT m [NodeDef] Source

    Get all the NodeDefs that have accumulated so far, and clear that buffer.

    Creating and looking up Ops

    getOrAddOp :: Op -> Build NodeName Source

    Render the given op if it hasn't been rendered already, and return its + name.

    addNewOp :: OpDef -> Build NodeDef Source

    Add a new node for a given OpDef. This is used for making "stateful" ops + which are not safe to dedup (e.g, "variable" and "assign").

    renderOutput :: Output -> Build Text Source

    Render an Output and return a string representation for the TensorFlow + foreign APIs.

    Modifying all nodes in a Build action

    colocateWith :: forall a v b. Tensor v b -> Build a -> Build a Source

    Places all nodes rendered in the given Build action on the same + device as the given Tensor (see also withDevice). Make sure that + the action has side effects of rendering the desired tensors. A pure + return would not have the desired effect.

    withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b Source

    Modify some part of the state, run an action, and restore the state + after that action is done.

    withDevice :: Maybe Device -> Build a -> Build a Source

    Set a device for all nodes rendered in the given Build action + (unless further overridden by another use of withDevice).

    withNameScope :: Text -> Build a -> Build a Source

    Prepend a scope to all nodes rendered in the given Build action.

    withNodeDependencies :: Set NodeName -> Build a -> Build a Source

    Add control inputs to all nodes rendered in the given Build action.

    Internal Summary related bits.

    addSummary :: SummaryTensor -> Build () Source

    Records the given summary action in Build for retrieval with + collectAllSummaries. The summary op is required to produce a + Summary protocol buffer in string form. For safety, use the + pre-composed functions: Logging.scalarSummary and + Logging.histogramSummary.

    type SummaryTensor = Tensor Value ByteString Source

    Synonym for the tensors that return serialized Summary proto.

    collectAllSummaries :: Monad m => BuildT m [SummaryTensor] Source

    Retrieves the summary ops collected thus far. Typically this only + happens once, but if buildWithSummary is used + repeatedly, the values accumulate.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html new file mode 100644 index 0000000..261e9ba --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html @@ -0,0 +1,6 @@ +TensorFlow.BuildOp

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.BuildOp

    Synopsis

    Documentation

    class OpResult a Source

    Class of types that can be used as op outputs.

    Minimal complete definition

    toResult

    Instances

    OpResult ControlNode Source 
    OpResult a => OpResult [a] Source 
    (OpResult a1, OpResult a2) => OpResult (a1, a2) Source 
    OpResult (Tensor Ref a) Source 
    OpResult (Tensor Value a) Source 
    (OpResult a1, OpResult a2, OpResult a3) => OpResult (a1, a2, a3) Source 
    (OpResult a1, OpResult a2, OpResult a3, OpResult a4) => OpResult (a1, a2, a3, a4) Source 
    (OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5) => OpResult (a1, a2, a3, a4, a5) Source 
    (OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5, OpResult a6) => OpResult (a1, a2, a3, a4, a5, a6) Source 

    class BuildOp f Source

    Class of types that can be used as op functions.

    Minimal complete definition

    buildOp'

    Instances

    BuildOp ControlNode Source 
    BuildOp [Tensor Value a] Source 
    OpResult a => BuildOp (Build a) Source 
    BuildOp f => BuildOp ([Tensor v a] -> f) Source 
    BuildOp f => BuildOp (Tensor v a -> f) Source 
    (OpResult t1, OpResult t2) => BuildOp (t1, t2) Source 
    BuildOp (Tensor Ref a) Source 
    BuildOp (Tensor Value a) Source 
    (OpResult t1, OpResult t2, OpResult t3) => BuildOp (t1, t2, t3) Source 
    (OpResult t1, OpResult t2, OpResult t3, OpResult t4) => BuildOp (t1, t2, t3, t4) Source 
    (OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5) => BuildOp (t1, t2, t3, t4, t5) Source 
    (OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5, OpResult t6) => BuildOp (t1, t2, t3, t4, t5, t6) Source 

    buildOp :: BuildOp f => OpDef -> f Source

    Starts an operation that returns a structured set of tensors + (singletons or tuples).

    buildListOp Source

    Arguments

    :: BuildOp f 
    => [Int64]

    Cardinality of the corresponding list of tensors output.

    -> OpDef 
    -> f 

    Starts an operation that returns a list of tensors.

    eqLengthGuard :: [(String, [(String, Int)])] -> Bool Source

    Returns true if all the integers in each tuple are identical. + Throws an error with a descriptive message if not.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html new file mode 100644 index 0000000..bfd5f5b --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html @@ -0,0 +1,9 @@ +TensorFlow.ControlFlow

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.ControlFlow

    Synopsis

    Dependencies

    withControlDependencies :: Nodes t => t -> Build a -> Build a Source

    Modify a Build action, such that all new ops rendered in it will depend + on the nodes in the first argument.

    group :: Nodes t => t -> Build ControlNode Source

    Create an op that groups multiple operations.

    When this op finishes, all ops in the input n have finished. This op has + no output.

    Operations

    identity :: TensorType a => Tensor v a -> Tensor v a Source

    Returns a Tensor with the same shape and contents as the input.

    noOp :: ControlNode Source

    Does nothing. Only useful as a placeholder for control edges.

    named :: TensorType a => Text -> Tensor v a -> Tensor v a Source

    Returns a Tensor with a given name and the same shape and contents as + the input.

    TODO(judahjacobson): This breaks when used with uninitialize Tensor Refs, + since RefIdentity doesn't have SetAllowsUninitializedInput(). Look into + whether we can change that op.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html new file mode 100644 index 0000000..5c669e5 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html @@ -0,0 +1,8 @@ +TensorFlow.Internal.FFI

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Internal.FFI

    Synopsis

    Documentation

    withSession Source

    Arguments

    :: (SessionOptions -> IO ()) 
    -> ((IO () -> IO ()) -> Session -> IO a)

    The action can spawn concurrent tasks which will + be canceled before withSession returns.

    -> IO a 

    Runs the given action after creating a session with options + populated by the given optionSetter.

    run Source

    Arguments

    :: Session 
    -> [(ByteString, TensorData)]

    Feeds.

    -> [ByteString]

    Fetches.

    -> [ByteString]

    Targets.

    -> IO [TensorData] 

    data TensorData Source

    All of the data needed to represent a tensor.

    setSessionConfig :: ConfigProto -> SessionOptions -> IO () Source

    setSessionTarget :: ByteString -> SessionOptions -> IO () Source

    getAllOpList :: IO ByteString Source

    Returns the serialized OpList of all OpDefs defined in this + address space.

    Internal helper.

    useProtoAsVoidPtrLen :: (Message msg, Num c) => msg -> (Ptr b -> c -> IO a) -> IO a Source

    Serializes the given msg and provides it as (ptr,len) argument + to the given action.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html new file mode 100644 index 0000000..14c3dcc --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html @@ -0,0 +1,4 @@ +TensorFlow.Internal.VarInt

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Internal.VarInt

    Description

    Originally taken from internal proto-lens code.

    Documentation

    getVarInt :: Parser Word64 Source

    Decode an unsigned varint.

    putVarInt :: Word64 -> Builder Source

    Encode a Word64.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html new file mode 100644 index 0000000..5971080 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html @@ -0,0 +1,6 @@ +TensorFlow.Nodes

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Nodes

    Synopsis

    Documentation

    class Nodes t where Source

    Types that contain ops which can be run.

    Methods

    getNodes :: t -> Build (Set NodeName) Source

    Instances

    Nodes ControlNode Source 
    Nodes t => Nodes [t] Source 
    (Nodes t1, Nodes t2) => Nodes (t1, t2) Source 
    Nodes (Tensor v a) Source 
    (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) Source 

    class Nodes t => Fetchable t a where Source

    Types that tensor representations (e.g. Tensor, ControlNode) can be + fetched into.

    Includes collections of tensors (e.g. tuples).

    Methods

    getFetch :: t -> Build (Fetch a) Source

    Instances

    (~) * a () => Fetchable ControlNode a Source 
    Fetchable t a => Fetchable [t] [a] Source 
    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (Scalar a') Source 
    (TensorType a, (~) * a a') => Fetchable (Tensor v a) (Vector a') Source 
    (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) Source 
    (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) Source 

    data Fetch a Source

    Fetch action. Keeps track of what needs to be fetched and how to decode + the fetched data.

    Constructors

    Fetch 

    Fields

    fetches :: Set Text

    Nodes to fetch

    fetchRestore :: Map Text TensorData -> a

    Function to create an a from the fetched data.

    nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b Source

    fetchTensorVector :: forall a v. TensorType a => Tensor v a -> Build (Fetch (Shape, Vector a)) Source

    newtype Scalar a Source

    Constructors

    Scalar 

    Fields

    unScalar :: a
     

    Instances

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html new file mode 100644 index 0000000..4584e8a --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html @@ -0,0 +1,14 @@ +TensorFlow.Output

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Output

    Contents

    Documentation

    newtype ControlNode Source

    A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

    Constructors

    ControlNode 

    Fields

    unControlNode :: Op
     

    newtype Device Source

    A device that a node can be assigned to. + There's a naming convention where the device names + are constructed from job and replica names.

    Constructors

    Device 

    Fields

    deviceName :: Text
     

    Ops

    newtype NodeName Source

    The name of a node in the graph. This corresponds to the proto field + NodeDef.name. Includes the scope prefix (if any) and a unique identifier + (if the node was implicitly named).

    Constructors

    NodeName 

    Fields

    unNodeName :: Text
     

    data Op Source

    The representation of a node in a TensorFlow graph.

    Constructors

    Rendered !NodeDef

    Properties are fixed, including the + device, name, and scope.

    Unrendered !OpDef

    Properties are not fixed, and may change depending + on which context this op is rendered in.

    opUnrendered :: Traversal' Op OpDef Source

    Traverse on the Unrendered of an Op.

    Same implementation as _Left.

    data OpDef Source

    Op definition. This corresponds somewhat to the NodeDef proto.

    newtype OpType Source

    The type of op of a node in the graph. This corresponds to the proto field + NodeDef.op.

    Constructors

    OpType 

    Fields

    unOpType :: Text
     

    data Output Source

    An output of a TensorFlow node.

    Constructors

    Output !OutputIx !Op 

    data PendingNodeName Source

    The name specified for an unrendered Op. If an Op has an + ImplicitName, it will be assigned based on the opType plus a + unique identifier. Does not contain the "scope" prefix.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html new file mode 100644 index 0000000..5d52b86 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html @@ -0,0 +1,23 @@ +TensorFlow.Session

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Session

    Documentation

    Opaque value created via sessionConfig and sessionTarget.

    data SessionOption Source

    Setting of an option for the session (see runSessionWithOptions).

    sessionConfig :: ConfigProto -> SessionOption Source

    Uses the specified config for the created session.

    sessionTarget :: ByteString -> SessionOption Source

    Target can be: "local", ip:port, host:port. + The set of supported factories depends on the linked in libraries. + REQUIRES "/learningbrain/public:tensorflow_remote" dependency for the binary.

    runSession :: Session a -> IO a Source

    Run Session actions in a new TensorFlow session.

    runSessionWithOptions :: [SessionOption] -> Session a -> IO a Source

    Run Session actions in a new TensorFlow session created with + the given option setter actions (sessionTarget, sessionConfig).

    build :: Build a -> Session a Source

    Lift a Build action into a Session, including any explicit op + renderings.

    buildAnd :: (a -> Session b) -> Build a -> Session b Source

    Helper combinator for doing something with the result of a Build action. + Example usage:

    buildAnd run :: Fetchable t a => Build t -> Session a

    buildWithSummary :: forall a. Build a -> Session (a, [SummaryTensor]) Source

    Lift a Build action into a Session, including any explicit op + renderings. Returns the merged summary ops which can be used for + logging, see build for a convenient wrapper.

    extend :: Session () Source

    Add all pending rendered nodes to the TensorFlow graph and runs + any pending initializers.

    Note that run, runWithFeeds, etc. will all call this function implicitly.

    run :: Fetchable t a => t -> Session a Source

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, and fetch the corresponding values for a.

    runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a Source

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a.

    run_ :: Nodes t => t -> Session () Source

    Run a subgraph t, rendering and extending any dependent nodes that aren't + already rendered. This behaves like run except that it doesn't do any + fetches.

    runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () Source

    Run a subgraph t, rendering any dependent nodes that aren't already + rendered, feed the given input values, and fetch the corresponding result + values for a. This behaves like runWithFeeds except that it doesn't do + any fetches.

    asyncProdNodes Source

    Arguments

    :: Nodes t 
    => t

    Node to evaluate concurrently.

    -> Session () 

    Starts a concurrent thread which evaluates the given Nodes + forever until runSession exits or an exception occurs. Graph + extension happens synchronously, but the resultant run proceeds as + a separate thread.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html new file mode 100644 index 0000000..950e4c4 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html @@ -0,0 +1,17 @@ +TensorFlow.Tensor

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Tensor

    Documentation

    data Tensor v a Source

    A named output of a TensorFlow operation.

    The type parameter a is the type of the elements in the Tensor. The + parameter v is either Value or Ref, depending on whether the graph is + treating this op output as an immutable Value or a stateful Ref (e.g., a + variable). Note that a Tensor Ref can be casted into a Tensor Value via + value.

    Constructors

    Tensor (TensorKind v) Output 

    Instances

    data TensorKind v where Source

    This class provides a runtime switch on whether a Tensor should be + treated as a Value or as a Ref.

    tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr Source

    Lens for the attributes of a tensor.

    Only valid if the tensor has not yet been rendered. If the tensor has been + rendered, the traversal will be over nothing (nothing can be read or + written).

    value :: Tensor v a -> Tensor Value a Source

    Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a + Ref into Value. This behaves like a no-op.

    data Feed Source

    A pair of a Tensor and some data that should be fed into that Tensor + when running the graph.

    Constructors

    Feed Output TensorData 

    feed :: Tensor v a -> TensorData a -> Feed Source

    Create a Feed for feeding the given data into a Tensor when running + the graph.

    Note that if a Tensor is rendered, its identity may change; so feeding the + rendered Tensor may be different than feeding the original Tensor.

    tensorFromName :: TensorKind v -> Text -> Tensor v a Source

    Create a Tensor for a given name. This can be used to reference nodes + in a GraphDef that was loaded via addGraphDef. + TODO(judahjacobson): add more safety checks here.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html new file mode 100644 index 0000000..28057b4 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html @@ -0,0 +1,13 @@ +TensorFlow.Types

    tensorflow-0.1.0.0: TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Types

    Synopsis

    Documentation

    class TensorType a where Source

    The class of scalar types supported by tensorflow.

    Methods

    tensorType :: a -> DataType Source

    tensorRefType :: a -> DataType Source

    tensorVal :: Lens' TensorProto [a] Source

    decodeTensorData :: TensorData a -> Vector a Source

    Decode the bytes of a TensorData into a Vector.

    encodeTensorData :: Shape -> Vector a -> TensorData a Source

    Encode a Vector into a TensorData.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) + element 1: index (0, ..., 1) + ...

    newtype TensorData a Source

    Data about a tensor that is encoded for the TensorFlow APIs.

    Constructors

    TensorData 

    newtype Shape Source

    Shape (dimensions) of a tensor.

    Constructors

    Shape [Int64] 

    Type constraints

    type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) Source

    A Constraint specifying the possible choices of a TensorType.

    We implement a Constraint like OneOf '[Double, Float] a by turning the + natural representation as a conjunction, i.e.,

       a == Double || a == Float
    +

    into a disjunction like

        a /= Int32 && a /= Int64 && a /= ByteString && ...
    +

    using an enumeration of all the possible TensorTypes.

    type family a /= b :: Constraint Source

    A constraint checking that two types are different.

    Equations

    a /= a = TypeError a ~ ExcludedCase 
    a /= b = () 

    Implementation of constraints

    data TypeError a Source

    Helper types to produce a reasonable type error message when the Constraint + "a /= a" fails. + TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.

    type family TensorTypes ts :: Constraint Source

    A Constraint checking that the input is a list of TensorTypes. + Helps improve error messages when using OneOf.

    Equations

    TensorTypes `[]` = () 
    TensorTypes (t : ts) = (TensorType t, TensorTypes ts) 

    type family NoneOf ts a :: Constraint Source

    A constraint that the type a doesn't appear in the type list ts. + Assumes that a and each of the elements of ts are TensorTypes.

    Equations

    NoneOf `[]` a = () 
    NoneOf (t : ts) a = (a /= t, NoneOf ts a) 

    type family as \\ bs Source

    Takes the difference of two lists of types.

    Equations

    as \\ `[]` = as 
    as \\ (b : bs) = Delete b as \\ bs 

    type family Delete a as Source

    Removes a type from the given list of types.

    Equations

    Delete a `[]` = `[]` 
    Delete a (a : as) = Delete a as 
    Delete a (b : as) = b : Delete a as 

    type AllTensorTypes = `[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool]` Source

    An enumeration of all valid TensorTypes.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-0.1.0.0/doc-index.html new file mode 100644 index 0000000..dc781ec --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index)

    tensorflow-0.1.0.0: TensorFlow bindings.

    Index

    /=TensorFlow.Types
    addGraphDefTensorFlow.Build, TensorFlow.Session
    addInitializerTensorFlow.Build
    addNewOpTensorFlow.Build
    addSummaryTensorFlow.Build
    AllTensorTypesTensorFlow.Types
    asGraphDefTensorFlow.Build
    asyncProdNodesTensorFlow.Session
    AttributeTensorFlow.Types
    attrLensTensorFlow.Types
    BuildTensorFlow.Build
    buildTensorFlow.Session
    buildAndTensorFlow.Session
    buildListOpTensorFlow.BuildOp
    BuildOpTensorFlow.BuildOp
    buildOpTensorFlow.BuildOp
    BuildTTensorFlow.Build
    buildWithSummaryTensorFlow.Session
    collectAllSummariesTensorFlow.Build
    colocateWithTensorFlow.Build
    ControlNode 
    1 (Type/Class)TensorFlow.Output, TensorFlow.Build
    2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
    decodeTensorDataTensorFlow.Types
    DeleteTensorFlow.Types
    Device 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    deviceNameTensorFlow.Output
    encodeTensorDataTensorFlow.Types
    eqLengthGuardTensorFlow.BuildOp
    evalBuildTTensorFlow.Build
    ExcludedCaseTensorFlow.Types
    ExplicitNameTensorFlow.Output
    explicitNameTensorFlow.Build
    extendTensorFlow.Session
    extendGraphTensorFlow.Internal.FFI
    Feed 
    1 (Type/Class)TensorFlow.Tensor
    2 (Data Constructor)TensorFlow.Tensor
    feedTensorFlow.Tensor
    Fetch 
    1 (Type/Class)TensorFlow.Nodes
    2 (Data Constructor)TensorFlow.Nodes
    FetchableTensorFlow.Nodes
    fetchesTensorFlow.Nodes
    fetchRestoreTensorFlow.Nodes
    fetchTensorListTensorFlow.Nodes
    fetchTensorVectorTensorFlow.Nodes
    flushInitializersTensorFlow.Build
    flushNodeBufferTensorFlow.Build
    getAllOpListTensorFlow.Internal.FFI
    getFetchTensorFlow.Nodes
    getNodesTensorFlow.Nodes
    getOrAddOpTensorFlow.Build
    getVarIntTensorFlow.Internal.VarInt
    GraphStateTensorFlow.Build
    groupTensorFlow.ControlFlow
    hoistBuildTTensorFlow.Build
    identityTensorFlow.ControlFlow
    ImplicitNameTensorFlow.Output
    implicitNameTensorFlow.Build
    namedTensorFlow.ControlFlow
    NodeName 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    NodesTensorFlow.Nodes
    nodesUnionTensorFlow.Nodes
    NoneOfTensorFlow.Types
    noOpTensorFlow.ControlFlow
    OneOfTensorFlow.Types
    OpTensorFlow.Output
    opAttrTensorFlow.Output, TensorFlow.Build
    opControlInputsTensorFlow.Output, TensorFlow.Build
    OpDef 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    opDefTensorFlow.Build
    opDefWithNameTensorFlow.Build
    opInputsTensorFlow.Output, TensorFlow.Build
    opNameTensorFlow.Output, TensorFlow.Build
    OpResultTensorFlow.BuildOp
    OpType 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    opTypeTensorFlow.Output, TensorFlow.Build
    opUnrenderedTensorFlow.Output
    Output 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    outputTensorFlow.Output
    outputIndexTensorFlow.Output
    OutputIx 
    1 (Type/Class)TensorFlow.Output
    2 (Data Constructor)TensorFlow.Output
    outputOpTensorFlow.Output
    PendingNodeNameTensorFlow.Output
    protoShapeTensorFlow.Types
    putVarIntTensorFlow.Internal.VarInt
    RefTensorFlow.Tensor
    RefKindTensorFlow.Tensor
    renderTensorFlow.Build
    RenderedTensorFlow.Output
    renderedNodeDefsTensorFlow.Build
    renderNodeNameTensorFlow.Build
    renderOutputTensorFlow.Build
    run 
    1 (Function)TensorFlow.Internal.FFI
    2 (Function)TensorFlow.Session
    runBuildTTensorFlow.Build
    runSessionTensorFlow.Session
    runSessionWithOptionsTensorFlow.Session
    runWithFeedsTensorFlow.Session
    runWithFeeds_TensorFlow.Session
    run_TensorFlow.Session
    Scalar 
    1 (Type/Class)TensorFlow.Nodes
    2 (Data Constructor)TensorFlow.Nodes
    Session 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Type/Class)TensorFlow.Session
    sessionConfigTensorFlow.Session
    SessionOptionTensorFlow.Session
    sessionTargetTensorFlow.Session
    setSessionConfigTensorFlow.Internal.FFI
    setSessionTargetTensorFlow.Internal.FFI
    Shape 
    1 (Type/Class)TensorFlow.Types
    2 (Data Constructor)TensorFlow.Types
    SummaryTensorTensorFlow.Build
    Tensor 
    1 (Type/Class)TensorFlow.Tensor
    2 (Data Constructor)TensorFlow.Tensor
    tensorAttrTensorFlow.Tensor
    TensorData 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    3 (Type/Class)TensorFlow.Types
    4 (Data Constructor)TensorFlow.Types
    tensorDataBytesTensorFlow.Internal.FFI
    tensorDataDimensionsTensorFlow.Internal.FFI
    tensorDataTypeTensorFlow.Internal.FFI
    TensorFlowException 
    1 (Type/Class)TensorFlow.Internal.FFI
    2 (Data Constructor)TensorFlow.Internal.FFI
    tensorFromNameTensorFlow.Tensor
    TensorKindTensorFlow.Tensor
    tensorKindTensorFlow.Tensor
    tensorOutputTensorFlow.Tensor
    tensorRefTypeTensorFlow.Types
    TensorTypeTensorFlow.Types
    tensorTypeTensorFlow.Types
    TensorTypesTensorFlow.Types
    tensorValTensorFlow.Types
    TypeErrorTensorFlow.Types
    unControlNodeTensorFlow.Output, TensorFlow.Build
    UniqueTensorFlow.Build
    unNodeNameTensorFlow.Output
    unOpTypeTensorFlow.Output
    unOutputIxTensorFlow.Output
    UnrenderedTensorFlow.Output
    unScalarTensorFlow.Nodes
    unTensorDataTensorFlow.Types
    useProtoAsVoidPtrLenTensorFlow.Internal.FFI
    ValueTensorFlow.Tensor
    valueTensorFlow.Tensor
    ValueKindTensorFlow.Tensor
    withControlDependenciesTensorFlow.ControlFlow
    withDeviceTensorFlow.Build
    withNameScopeTensorFlow.Build
    withNodeDependenciesTensorFlow.Build
    withSessionTensorFlow.Internal.FFI
    withStateLensTensorFlow.Build
    \\TensorFlow.Types
    _opAttrsTensorFlow.Output
    _opControlInputsTensorFlow.Output
    _opInputsTensorFlow.Output
    _opNameTensorFlow.Output
    _opTypeTensorFlow.Output
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/frames.html b/docs/haddock/tensorflow-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-0.1.0.0: TensorFlow bindings. \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/index.html b/docs/haddock/tensorflow-0.1.0.0/index.html new file mode 100644 index 0000000..d5426e3 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings.

    tensorflow-0.1.0.0: TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html new file mode 100644 index 0000000..a4b221c --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html @@ -0,0 +1,4 @@ +TensorFlow.Build

    TensorFlow.Build

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html new file mode 100644 index 0000000..cd0e477 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html @@ -0,0 +1,4 @@ +TensorFlow.BuildOp

    TensorFlow.BuildOp

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html new file mode 100644 index 0000000..3d5fc95 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html @@ -0,0 +1,4 @@ +TensorFlow.ControlFlow

    TensorFlow.ControlFlow

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html new file mode 100644 index 0000000..bd4a296 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-FFI.html @@ -0,0 +1,4 @@ +TensorFlow.Internal.FFI

    TensorFlow.Internal.FFI

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html new file mode 100644 index 0000000..cd36a75 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Internal-VarInt.html @@ -0,0 +1,4 @@ +TensorFlow.Internal.VarInt

    TensorFlow.Internal.VarInt

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html new file mode 100644 index 0000000..6765575 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html @@ -0,0 +1,4 @@ +TensorFlow.Nodes

    TensorFlow.Nodes

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html new file mode 100644 index 0000000..22c2d31 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html @@ -0,0 +1,4 @@ +TensorFlow.Output

    TensorFlow.Output

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html new file mode 100644 index 0000000..a37ad20 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html @@ -0,0 +1,4 @@ +TensorFlow.Session

    TensorFlow.Session

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html new file mode 100644 index 0000000..cdab973 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html @@ -0,0 +1,4 @@ +TensorFlow.Tensor

    TensorFlow.Tensor

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html new file mode 100644 index 0000000..5ba106f --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html @@ -0,0 +1,4 @@ +TensorFlow.Types

    TensorFlow.Types

    class TensorType a

    data TensorData a

    data Shape

    class Attribute a

    Type constraints

    type OneOf ts a

    type family a /= b :: Constraint

    Implementation of constraints

    data TypeError a

    type family TensorTypes ts :: Constraint

    type family NoneOf ts a :: Constraint

    type family as \\ bs

    type family Delete a as

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/minus.gif b/docs/haddock/tensorflow-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-0.1.0.0/plus.gif b/docs/haddock/tensorflow-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html new file mode 100644 index 0000000..57b54be --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html @@ -0,0 +1,387 @@ + + + + + +src/TensorFlow/Build.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE LambdaCase #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +module TensorFlow.Build
    +    ( -- * Graph node types
    +      ControlNode(..)
    +    , Unique
    +    -- * Ops
    +    , explicitName
    +    , implicitName
    +    , opDef
    +    , opDefWithName
    +    , opName
    +    , opType
    +    , opAttr
    +    , opInputs
    +    , opControlInputs
    +    -- * The Build monad
    +    , GraphState
    +    , render
    +    , renderNodeName
    +    , renderedNodeDefs
    +    , BuildT
    +    , Build
    +    , addInitializer
    +    , hoistBuildT
    +    , evalBuildT
    +    , runBuildT
    +    , asGraphDef
    +    , addGraphDef
    +    , flushInitializers
    +    , flushNodeBuffer
    +    -- * Creating and looking up Ops
    +    , getOrAddOp
    +    , addNewOp
    +    , renderOutput
    +    -- * Modifying all nodes in a Build action
    +    , colocateWith
    +    , withStateLens
    +    , withDevice
    +    , withNameScope
    +    , withNodeDependencies
    +    -- * Internal Summary related bits.
    +    , addSummary
    +    , SummaryTensor
    +    , collectAllSummaries
    +    ) where
    +
    +import Control.Monad.IO.Class (MonadIO(..))
    +import Control.Monad.Trans.Class (MonadTrans(..))
    +import Control.Monad.Trans.State.Strict(StateT(..), mapStateT, evalStateT)
    +import Data.ByteString (ByteString)
    +import Data.Default (def)
    +import Data.Functor.Identity (Identity(..))
    +import qualified Data.Map.Strict as Map
    +import Data.Monoid ((<>))
    +import qualified Data.Set as Set
    +import Data.Set (Set)
    +import Data.String (IsString(..))
    +import Data.Text (Text)
    +import qualified Data.Text as Text
    +import Lens.Family2 (Lens', (.~), (^.), (&))
    +import Lens.Family2.State.Strict (MonadState, use, uses, (.=), (<>=), (%=))
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.Graph
    +    ( GraphDef
    +    , node
    +    )
    +import Proto.Tensorflow.Core.Framework.NodeDef
    +    ( NodeDef
    +    , attr
    +    , input
    +    , device
    +    , name
    +    , op
    +    )
    +
    +import TensorFlow.Orphans ()
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +
    +newtype Unique = Unique Int
    +    deriving (Eq, Ord, Enum)
    +
    +--------------
    +
    +implicitName :: PendingNodeName
    +implicitName = ImplicitName
    +
    +explicitName :: Text -> PendingNodeName
    +explicitName = ExplicitName
    +
    +newtype Scope = Scope {unScope :: Text}
    +    deriving (Eq, Ord, IsString)
    +
    +instance Show Scope where
    +    show = show . unScope
    +
    +opDef :: OpType -> OpDef
    +opDef = opDefWithName ImplicitName
    +
    +opDefWithName :: PendingNodeName -> OpType -> OpDef
    +opDefWithName n t = OpDef
    +    { _opName = n
    +    , _opType = t
    +    , _opAttrs = Map.empty
    +    , _opInputs = []
    +    , _opControlInputs = []
    +    }
    +
    +-- | Synonym for the tensors that return serialized Summary proto.
    +type SummaryTensor = Tensor Value ByteString
    +
    +data GraphState = GraphState
    +    { _renderedNodes :: !(Map.Map PendingNode NodeDef)
    +        -- ^ Nodes which have been rendered.  Keeps track of the unique ID we
    +        -- assign each implicitly-named node.  Also prevents us from adding the
    +        -- same node (implicit or explicit) more than once to the nodeBuffer.
    +    , _renderedNodeDefs :: !(Map.Map NodeName NodeDef)
    +        -- ^ The NodeDefs of nodes which have been rendered. Used by the
    +        -- Gradient module to inspect the node graph.
    +    , _nodeBuffer :: [NodeDef]
    +        -- ^ A list of nodes that should be passed to TensorFlow during
    +        -- the next call to Session.extend (TF_ExtendGraph).
    +    , _nextUnique :: !Unique
    +        -- ^ Unique ID for the next node
    +    -- TODO(judahjacobson): watch for clashes between auto and user names.
    +    , _defaultDevice :: !(Maybe Device)
    +    , _currentScope :: [Scope]
    +    , _defaultControlInputs :: !(Set NodeName)
    +    , _initializationNodes  :: [NodeName]
    +      -- ^ The nodes to run next time a TF.run is issued, typically
    +      -- variable initializers.
    +    , _summaries :: [SummaryTensor]
    +      -- ^ The tensors for summary
    +    }
    +
    +-- | A node definition without its final name.  Used as a key in the
    +-- "renderedNodes" map.
    +-- The NodeDef contained inside has an empty "name" field.
    +data PendingNode = PendingNode [Scope] !PendingNodeName !NodeDef
    +    deriving (Eq, Ord)
    +
    +-- Returns an _incomplete_ NodeDef. The name is fixed by addNewOpFromPending.
    +pendingNodeDef :: PendingNode -> NodeDef
    +pendingNodeDef (PendingNode _ _ n) = n
    +
    +initGraphState :: GraphState
    +initGraphState =
    +    GraphState Map.empty Map.empty [] (Unique 0) Nothing [] Set.empty [] []
    +
    +renderedNodes :: Lens' GraphState (Map.Map PendingNode NodeDef)
    +renderedNodes = lens _renderedNodes (\g x -> g { _renderedNodes = x })
    +
    +renderedNodeDefs :: Lens' GraphState (Map.Map NodeName NodeDef)
    +renderedNodeDefs = lens _renderedNodeDefs (\g x -> g { _renderedNodeDefs = x })
    +
    +nodeBuffer :: Lens' GraphState [NodeDef]
    +nodeBuffer = lens _nodeBuffer (\g x -> g { _nodeBuffer = x })
    +
    +nextUnique :: Lens' GraphState Unique
    +nextUnique = lens _nextUnique (\g x -> g { _nextUnique = x })
    +
    +defaultDevice :: Lens' GraphState (Maybe Device)
    +defaultDevice = lens _defaultDevice (\g x -> g { _defaultDevice = x })
    +
    +currentScope :: Lens' GraphState [Scope]
    +currentScope = lens _currentScope (\g x -> g { _currentScope = x })
    +
    +defaultControlInputs :: Lens' GraphState (Set NodeName)
    +defaultControlInputs = lens _defaultControlInputs
    +                          (\g x -> g { _defaultControlInputs = x })
    +
    +initializationNodes :: Lens' GraphState [NodeName]
    +initializationNodes = lens _initializationNodes (\g x -> g { _initializationNodes = x })
    +
    +summaries :: Lens' GraphState [SummaryTensor]
    +summaries = lens _summaries (\g x -> g { _summaries = x })
    +
    +-- | An action for building nodes in a TensorFlow graph.
    +-- Used to manage build state internally as part of the @Session@ monad.
    +newtype BuildT m a = BuildT (StateT GraphState m a)
    +    deriving (Functor, Applicative, Monad, MonadIO, MonadTrans,
    +              MonadState GraphState)
    +
    +-- | An action for building nodes in a TensorFlow graph.
    +type Build = BuildT Identity
    +
    +-- | This is Control.Monad.Morph.hoist sans the dependency.
    +hoistBuildT :: (forall a . m a -> n a) -> BuildT m b -> BuildT n b
    +hoistBuildT f (BuildT m) = BuildT $ mapStateT f m
    +
    +runBuildT :: BuildT m a -> m (a, GraphState)
    +runBuildT (BuildT f) = runStateT f initGraphState
    +
    +evalBuildT :: Monad m => BuildT m a -> m a
    +evalBuildT (BuildT f) = evalStateT f initGraphState
    +
    +-- | Get all the NodeDefs that have accumulated so far, and clear that buffer.
    +flushNodeBuffer :: Monad m => BuildT m [NodeDef]
    +flushNodeBuffer = do
    +    ns <- use nodeBuffer
    +    nodeBuffer .= []
    +    return ns
    +
    +-- | Get all the initializers that have accumulated so far, and clear
    +-- that buffer.
    +flushInitializers :: Monad m => BuildT m [NodeName]
    +flushInitializers = do
    +    ns <- use initializationNodes
    +    initializationNodes .= []
    +    return ns
    +
    +-- | Registers the given node to be executed before the next
    +-- 'TensorFlow.Session.run'.
    +addInitializer :: ControlNode -> Build ()
    +addInitializer (ControlNode o) = do
    +    i <- getOrAddOp o
    +    initializationNodes %= (i:)
    +
    +-- | Produce a GraphDef proto representation of the nodes that are rendered in
    +-- the given 'Build' action.
    +asGraphDef :: Build a -> GraphDef
    +asGraphDef b = def & node .~ gs ^. nodeBuffer
    +  where
    +    gs = snd $ runIdentity $ runBuildT b
    +
    +-- TODO: check against existing nodes for conflicts?
    +addGraphDef :: GraphDef -> Build ()
    +addGraphDef g = nodeBuffer <>= g ^. node
    +
    +-- | Render the given op if it hasn't been rendered already, and return its
    +-- name.
    +getOrAddOp :: Op -> Build NodeName
    +getOrAddOp o = NodeName . (^. name) <$> resolveOp o
    +
    +resolveOp :: Op -> Build NodeDef
    +resolveOp (Rendered n) = return n
    +resolveOp (Unrendered o) = do
    +    pending <- getPendingNode o
    +    uses renderedNodes (Map.lookup pending) >>= \case
    +        Just n -> return n
    +        Nothing -> addNewOpFromPending pending
    +
    +-- | Add a new node for a given 'OpDef'.  This is used for making "stateful" ops
    +-- which are not safe to dedup (e.g, "variable" and "assign").
    +addNewOp :: OpDef -> Build NodeDef
    +addNewOp o = getPendingNode o >>= addNewOpFromPending
    +
    +addNewOpFromPending :: PendingNode -> Build NodeDef
    +addNewOpFromPending pending = do
    +    nodeName <- renderPendingNode pending
    +    let nodeDef = pendingNodeDef pending & name .~ unNodeName nodeName
    +    nodeBuffer %= (nodeDef :)
    +    renderedNodes %= Map.insert pending nodeDef
    +    renderedNodeDefs %= Map.insert nodeName nodeDef
    +    return nodeDef
    +
    +-- | Get the pending node corresponding to an OpDef, which may or may not have
    +-- been rendered before.  Implicitly renders all of this node's inputs.
    +getPendingNode :: OpDef -> Build PendingNode
    +getPendingNode o = do
    +    -- An empty string in the proto field means that no specific
    +    -- device is specified.
    +    dev <- maybe "" deviceName <$> use defaultDevice
    +    inputs <- mapM getInput (o ^. opInputs)
    +    scope <- use currentScope
    +    controls <- use defaultControlInputs
    +    let controlInputs
    +            = map getDep (o ^. opControlInputs ++ Set.toList controls)
    +    return $ PendingNode scope (o ^. opName)
    +            $ def & op .~ (unOpType (o ^. opType) :: Text)
    +                  & attr .~ _opAttrs o
    +                  & input .~ (inputs ++ controlInputs)
    +                  & device .~ dev
    +  where
    +    getInput (Output (OutputIx k) subOp)
    +        = (<> ":" <> Text.pack (show k)) . unNodeName <$> getOrAddOp subOp
    +    getDep = ("^" <>) . unNodeName
    +
    +-- | Pick a name for a pending node.  If it has an explicit name, just use that;
    +-- if the name is implicit, assign a new unique name based on the op type.
    +renderPendingNode :: PendingNode -> Build NodeName
    +renderPendingNode (PendingNode scope pendingName nodeDef)
    +    = NodeName . (scopePrefix <>) <$> getName
    +  where
    +    scopePrefix = Text.concat $ fmap ((<> "/") . unScope) scope
    +    getName = case pendingName of
    +        ExplicitName n -> return n
    +        ImplicitName -> do
    +            u@(Unique k) <- use nextUnique
    +            nextUnique .= succ u
    +            return $ nodeDef ^. op <> "_" <> Text.pack (show k)
    +
    +
    +-- | Render an 'Output' and return a string representation for the TensorFlow
    +-- foreign APIs.
    +renderOutput :: Output -> Build Text
    +renderOutput (Output (OutputIx i) o) = do
    +    n <- getOrAddOp o
    +    return $ unNodeName n <> Text.pack (":" ++ show i)
    +
    +-- | Modify some part of the state, run an action, and restore the state
    +-- after that action is done.
    +withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b
    +withStateLens accessor f act = do
    +    old <- use accessor
    +    accessor %= f
    +    result <- act
    +    accessor .= old
    +    return result
    +
    +-- | Set a device for all nodes rendered in the given 'Build' action
    +-- (unless further overridden by another use of withDevice).
    +withDevice :: Maybe Device -> Build a -> Build a
    +withDevice d = withStateLens defaultDevice (const d)
    +
    +-- | Places all nodes rendered in the given 'Build' action on the same
    +-- device as the given Tensor (see also 'withDevice'). Make sure that
    +-- the action has side effects of rendering the desired tensors. A pure
    +-- return would not have the desired effect.
    +colocateWith :: forall a v b . Tensor v b -> Build a -> Build a
    +colocateWith t x = do
    +    d <- Device . (^. device) <$> resolveOp (t ^. tensorOutput . outputOp)
    +    withDevice (Just d) x
    +
    +-- | Prepend a scope to all nodes rendered in the given 'Build' action.
    +withNameScope :: Text -> Build a -> Build a
    +withNameScope s = withStateLens currentScope (Scope s :)
    +
    +-- | Add control inputs to all nodes rendered in the given 'Build' action.
    +withNodeDependencies :: Set NodeName -> Build a -> Build a
    +withNodeDependencies nodes = withStateLens defaultControlInputs (<> nodes)
    +
    +-- | Render a 'Tensor', fixing its name, scope, device and control inputs from
    +-- the 'Build' context.  Also renders any dependencies of the 'Tensor' that
    +-- weren't already rendered.
    +--
    +-- This operation is idempotent; @render >=> render === render@.  However,
    +-- rendering a (previously un-rendered) 'Tensor' in two different contexts
    +-- may result in two different 'Tensor's.
    +render :: Tensor v a -> Build (Tensor v a)
    +render = tensorOutput $ outputOp $ fmap Rendered . resolveOp
    +
    +-- | Render a 'Tensor' and get its node's name.
    +renderNodeName :: Tensor v a -> Build NodeName
    +renderNodeName t = getOrAddOp (t ^. tensorOutput . outputOp)
    +
    +-- | Records the given summary action in Build for retrieval with
    +-- 'collectAllSummaries'. The summary op is required to produce a
    +-- Summary protocol buffer in string form. For safety, use the
    +-- pre-composed functions: Logging.scalarSummary and
    +-- Logging.histogramSummary.
    +addSummary :: SummaryTensor -> Build ()
    +addSummary t = summaries %= (t :)
    +
    +-- | Retrieves the summary ops collected thus far. Typically this only
    +-- happens once, but if 'TensorFlow.Session.buildWithSummary' is used
    +-- repeatedly, the values accumulate.
    +collectAllSummaries :: Monad m => BuildT m [SummaryTensor]
    +collectAllSummaries = use summaries
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html new file mode 100644 index 0000000..ff74848 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html @@ -0,0 +1,210 @@ + + + + + +src/TensorFlow/BuildOp.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE TupleSections #-}
    +
    +module TensorFlow.BuildOp
    +    ( OpResult
    +    , BuildOp
    +    , buildOp
    +    , buildListOp
    +    , eqLengthGuard
    +    )
    +  where
    +
    +import Control.Monad (replicateM)
    +import Control.Monad.Reader (ReaderT, runReaderT, ask)
    +import Control.Monad.State.Strict (State, runState, get, put)
    +import Data.Int (Int64)
    +import Lens.Family2 ((&), (<>~), (^.))
    +
    +import TensorFlow.Build
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +
    +data ResultState = ResultState !OutputIx [Int64] deriving Show
    +
    +type Result = ReaderT Op (State ResultState)
    +
    +-- | Class of types that can be used as op outputs.
    +class OpResult a where
    +    toResult :: Result a
    +
    +instance (OpResult a1, OpResult a2) => OpResult (a1, a2) where
    +    toResult = (,) <$> toResult <*> toResult
    +
    +instance (OpResult a1, OpResult a2, OpResult a3) => OpResult (a1, a2, a3) where
    +    toResult = (,,) <$> toResult <*> toResult <*> toResult
    +
    +instance (OpResult a1, OpResult a2, OpResult a3, OpResult a4)
    +         => OpResult (a1, a2, a3, a4) where
    +    toResult = (,,,) <$> toResult <*> toResult <*> toResult <*> toResult
    +
    +instance (OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5)
    +         => OpResult (a1, a2, a3, a4, a5) where
    +    toResult = (,,,,) <$> toResult
    +                      <*> toResult
    +                      <*> toResult
    +                      <*> toResult
    +                      <*> toResult
    +
    +instance ( OpResult a1
    +         , OpResult a2
    +         , OpResult a3
    +         , OpResult a4
    +         , OpResult a5
    +         , OpResult a6
    +         )
    +         => OpResult (a1, a2, a3, a4, a5, a6) where
    +    toResult = (,,,,,)
    +               <$> toResult
    +               <*> toResult
    +               <*> toResult
    +               <*> toResult
    +               <*> toResult
    +               <*> toResult
    +
    +tensorResult :: TensorKind v -> Result (Tensor v a)
    +tensorResult v = do
    +    o <- ask
    +    ResultState i ns <- get
    +    put $! ResultState (i+1) ns
    +    return $! Tensor v $ output i o
    +
    +instance OpResult (Tensor Value a) where
    +    toResult = tensorResult ValueKind
    +
    +instance OpResult (Tensor Ref a) where
    +    toResult = tensorResult RefKind
    +
    +instance OpResult ControlNode where
    +    toResult = ControlNode <$> ask
    +
    +instance OpResult a => OpResult [a] where
    +    toResult = do
    +        ResultState i ns <- get
    +        case ns of
    +            [] -> error $ "Ran out of counts in toResult. " ++
    +                          "Likely misuse of buildListOp."
    +            (n : rest) -> do
    +                put $! ResultState i rest
    +                replicateM (fromIntegral n) toResult
    +
    +runResult :: OpResult a => [Int64] -> Op -> a
    +runResult ns o =
    +    case runState (runReaderT toResult o) (ResultState 0 ns) of
    +        (x, ResultState _ []) -> x
    +        (_, ns') -> error $ "Ununsed length in runResult attributes: " ++
    +                            show (ns, ns')
    +
    +-- | Make a new "pure" op, which may be deduped with identical ops within
    +-- the same scope.
    +pureResult :: OpResult a => [Int64] -> OpDef -> [Output] -> a
    +pureResult ns o ts = runResult ns $ Unrendered $ addReversedInputs o ts
    +
    +-- | Make a new "stateful" op, which will not be deduped with otherwise
    +-- identical ops.
    +buildResult :: OpResult a => [Int64] -> OpDef -> [Output] -> Build a
    +buildResult ns o ts
    +    = runResult ns . Rendered <$> addNewOp (addReversedInputs o ts)
    +
    +addReversedInputs :: OpDef -> [Output] -> OpDef
    +addReversedInputs o ts = o & opInputs <>~ reverse ts
    +
    +-- | Class of types that can be used as op functions.
    +class BuildOp f where
    +    buildOp' :: [Int64]  -- ^ Sizes of list results (having number_attr)
    +             -> OpDef
    +             -> [Output] -- ^ Accumulator for inputs to the op.
    +             -> f
    +
    +-- | Starts an operation that returns a structured set of tensors
    +-- (singletons or tuples).
    +buildOp :: BuildOp f => OpDef -> f
    +buildOp o = buildOp' [] o []
    +
    +-- | Starts an operation that returns a list of tensors.
    +buildListOp :: BuildOp f => [Int64]
    +               -- ^ Cardinality of the corresponding list of tensors output.
    +               -> OpDef -> f
    +buildListOp counts o = buildOp' counts o []
    +
    +instance BuildOp ControlNode where
    +    buildOp' _ o ts = ControlNode $ Unrendered $ addReversedInputs o ts
    +
    +instance BuildOp (Tensor Value a) where
    +    buildOp' = pureResult
    +
    +instance BuildOp (Tensor Ref a) where
    +    buildOp' = pureResult
    +
    +instance BuildOp [Tensor Value a] where
    +    buildOp' = pureResult
    +
    +instance (OpResult t1, OpResult t2) => BuildOp (t1, t2) where
    +    buildOp' = pureResult
    +
    +instance (OpResult t1, OpResult t2, OpResult t3) => BuildOp (t1, t2, t3) where
    +    buildOp' = pureResult
    +
    +instance (OpResult t1, OpResult t2, OpResult t3, OpResult t4)
    +         => BuildOp (t1, t2, t3, t4) where
    +    buildOp' = pureResult
    +
    +instance (OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5)
    +         => BuildOp (t1, t2, t3, t4, t5) where
    +    buildOp' = pureResult
    +
    +instance ( OpResult t1
    +         , OpResult t2
    +         , OpResult t3
    +         , OpResult t4
    +         , OpResult t5
    +         , OpResult t6
    +         )
    +         => BuildOp (t1, t2, t3, t4, t5, t6) where
    +    buildOp' = pureResult
    +
    +instance OpResult a => BuildOp (Build a) where
    +    buildOp' = buildResult
    +
    +instance BuildOp f => BuildOp (Tensor v a -> f) where
    +    buildOp' rf o ts t = buildOp' rf o (t ^. tensorOutput : ts)
    +
    +instance BuildOp f => BuildOp ([Tensor v a] -> f) where
    +    buildOp' rf o accum ts
    +        = buildOp' rf o (reverse (fmap (^. tensorOutput) ts) ++ accum)
    +
    +-- | Returns true if all the integers in each tuple are identical.
    +-- Throws an error with a descriptive message if not.
    +eqLengthGuard :: [(String, [(String, Int)])] -> Bool
    +eqLengthGuard = all eachOk
    +  where
    +    eachOk (_, []) = True
    +    -- The next line has (== 1) . length . nub in disguise
    +    eachOk (numberAttrName, pairs@((_, x) : zs)) = all (\z -> snd z == x) zs ||
    +        error ("number_attr " ++ numberAttrName ++
    +               " contains tensors with different length " ++ show pairs)
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html new file mode 100644 index 0000000..091a8cf --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html @@ -0,0 +1,98 @@ + + + + + +src/TensorFlow/ControlFlow.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +
    +module TensorFlow.ControlFlow
    +    ( -- * Dependencies
    +      withControlDependencies
    +    , group
    +      -- * Operations
    +    , identity
    +    , noOp
    +    , named
    +    ) where
    +
    +import qualified Data.Set as Set
    +import Data.Text (Text)
    +import Lens.Family2 ((&), (^.), (.~))
    +
    +import TensorFlow.BuildOp
    +import TensorFlow.Build
    +import TensorFlow.Nodes
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +
    +-- | Modify a 'Build' action, such that all new ops rendered in it will depend
    +-- on the nodes in the first argument.
    +withControlDependencies :: Nodes t => t -> Build a -> Build a
    +withControlDependencies deps act = do
    +    nodes <- getNodes deps
    +    withNodeDependencies nodes act
    +
    +-- TODO(judahjacobson): Reimplement withDependencies.
    +
    +-- | Create an op that groups multiple operations.
    +--
    +-- When this op finishes, all ops in the input @n@ have finished.  This op has
    +-- no output.
    +group :: Nodes t => t -> Build ControlNode
    +group deps = do
    +    nodes <- Set.toList <$> getNodes deps
    +    -- TODO: slicker way
    +    return $ buildOp $ opDef "NoOp" & opControlInputs .~ nodes
    +
    +
    +-- | Returns a 'Tensor' with the same shape and contents as the input.
    +identity :: TensorType a => Tensor v a -> Tensor v a
    +identity = namedIdentity implicitName
    +
    +-- | Returns a 'Tensor' with a given name and the same shape and contents as
    +-- the input.
    +--
    +-- TODO(judahjacobson): This breaks when used with uninitialize @Tensor Ref@s,
    +-- since @RefIdentity@ doesn't have SetAllowsUninitializedInput().  Look into
    +-- whether we can change that op.
    +named :: TensorType a => Text -> Tensor v a -> Tensor v a
    +named = namedIdentity . explicitName
    +
    +-- | An internal version of "identity" that allows setting the name
    +-- of the output Tensor.
    +namedIdentity :: forall a v . TensorType a
    +              => PendingNodeName -> Tensor v a -> Tensor v a
    +namedIdentity n t = case t ^. tensorKind of
    +                      ValueKind -> buildOp (opDefWithName n "Identity" & setTypeAttr) t
    +                      RefKind -> buildOp (opDefWithName n "RefIdentity" & setTypeAttr) t
    +  where
    +    setTypeAttr = opAttr "T" .~ tensorType (undefined :: a)
    +
    +
    +-- | Does nothing.  Only useful as a placeholder for control edges.
    +noOp :: ControlNode
    +noOp = buildOp $ opDef "NoOp"
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html new file mode 100644 index 0000000..9ed15ea --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html @@ -0,0 +1,254 @@ + + + + + +src/TensorFlow/Internal/FFI.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE DeriveDataTypeable #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +
    +module TensorFlow.Internal.FFI
    +    ( TensorFlowException(..)
    +    , Raw.Session
    +    , withSession
    +    , extendGraph
    +    , run
    +    , TensorData(..)
    +    , setSessionConfig
    +    , setSessionTarget
    +    , getAllOpList
    +      -- * Internal helper.
    +    , useProtoAsVoidPtrLen
    +    )
    +    where
    +
    +import Control.Concurrent.Async (Async, async, cancel, waitCatch)
    +import Control.Concurrent.MVar (MVar, modifyMVarMasked_, newMVar, takeMVar)
    +import Control.Exception (Exception, throwIO, bracket, finally, mask_)
    +import Control.Monad (when)
    +import Data.Int (Int64)
    +import Data.Typeable (Typeable)
    +import Data.Word (Word8)
    +import Foreign (Ptr, FunPtr, nullPtr, castPtr)
    +import Foreign.C.String (CString)
    +import Foreign.ForeignPtr (newForeignPtr, withForeignPtr)
    +import Foreign.Marshal.Alloc (free)
    +import Foreign.Marshal.Array (withArrayLen, peekArray, mallocArray, copyArray)
    +import System.IO.Unsafe (unsafePerformIO)
    +import qualified Data.ByteString as B
    +import qualified Data.Text as T
    +import qualified Data.Text.Encoding as T
    +import qualified Data.Text.Encoding.Error as T
    +import qualified Data.Vector.Storable as S
    +
    +import Data.ProtoLens (Message, encodeMessage)
    +import Proto.Tensorflow.Core.Framework.Graph (GraphDef)
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
    +
    +import qualified TensorFlow.Internal.Raw as Raw
    +
    +data TensorFlowException = TensorFlowException Raw.Code T.Text
    +    deriving (Show, Eq, Typeable)
    +
    +instance Exception TensorFlowException
    +
    +-- | All of the data needed to represent a tensor.
    +data TensorData = TensorData
    +    { tensorDataDimensions :: [Int64]
    +    , tensorDataType       :: !DataType
    +    , tensorDataBytes      :: !(S.Vector Word8)
    +    }
    +  deriving (Show, Eq)
    +
    +-- | Runs the given action after creating a session with options
    +-- populated by the given optionSetter.
    +withSession :: (Raw.SessionOptions -> IO ())
    +            -> ((IO () -> IO ()) -> Raw.Session -> IO a)
    +            -- ^ The action can spawn concurrent tasks which will
    +            -- be canceled before withSession returns.
    +            -> IO a
    +withSession optionSetter action = do
    +    drain <- newMVar []
    +    let cleanup s =
    +        -- Closes the session to nudge the pending run calls to fail and exit.
    +            finally (checkStatus (Raw.closeSession s)) $ do
    +                runners <- takeMVar drain
    +                -- Collects all runners before deleting the session.
    +                mapM_ shutDownRunner runners
    +                checkStatus (Raw.deleteSession s)
    +    bracket Raw.newSessionOptions Raw.deleteSessionOptions $ \options -> do
    +        optionSetter options
    +        bracket
    +            (checkStatus (Raw.newSession options))
    +            cleanup
    +            (action (asyncCollector drain))
    +
    +asyncCollector :: MVar [Async ()] -> IO () -> IO ()
    +asyncCollector drain runner = modifyMVarMasked_ drain launchAndRecord
    +    where
    +      launchAndRecord restRunners = (: restRunners) <$> async runner
    +
    +shutDownRunner :: Async () -> IO ()
    +shutDownRunner r = do
    +    cancel r
    +    -- TODO(gnezdo): manage exceptions better than print.
    +    either print (const (return ())) =<< waitCatch r
    +
    +extendGraph :: Raw.Session -> GraphDef -> IO ()
    +extendGraph session pb =
    +    useProtoAsVoidPtrLen pb $ \ptr len ->
    +        checkStatus $ Raw.extendGraph session ptr len
    +
    +
    +run :: Raw.Session
    +    -> [(B.ByteString, TensorData)] -- ^ Feeds.
    +    -> [B.ByteString]               -- ^ Fetches.
    +    -> [B.ByteString]               -- ^ Targets.
    +    -> IO [TensorData]
    +run session feeds fetches targets = do
    +    let nullTensor = Raw.Tensor nullPtr
    +    -- Use mask to avoid leaking input tensors before they are passed to 'run'
    +    -- and output tensors before they are passed to 'createTensorData'.
    +    mask_ $
    +        -- Feeds
    +        withStringArrayLen (fst <$> feeds) $ \feedsLen feedNames ->
    +        mapM (createRawTensor . snd) feeds >>= \feedTensors ->
    +        withArrayLen feedTensors $ \_ cFeedTensors ->
    +        -- Fetches.
    +        withStringArrayLen fetches $ \fetchesLen fetchNames ->
    +        -- tensorOuts is an array of null Tensor pointers that will be filled
    +        -- by the call to Raw.run.
    +        withArrayLen (replicate fetchesLen nullTensor) $ \_ tensorOuts ->
    +        -- Targets.
    +        withStringArrayLen targets $ \targetsLen ctargets -> do
    +            checkStatus $ Raw.run
    +                session
    +                nullPtr
    +                feedNames cFeedTensors (fromIntegral feedsLen)
    +                fetchNames tensorOuts (fromIntegral fetchesLen)
    +                ctargets (fromIntegral targetsLen)
    +                nullPtr
    +            outTensors <- peekArray fetchesLen tensorOuts
    +            mapM createTensorData outTensors
    +
    +
    +-- Internal.
    +
    +
    +-- | Use a list of ByteString as a list of CString.
    +withStringList :: [B.ByteString] -> ([CString] -> IO a) -> IO a
    +withStringList strings fn = go strings []
    +  where
    +    go [] cs = fn (reverse cs)
    +    -- TODO(fmayle): Is it worth using unsafeAsCString here?
    +    go (x:xs) cs = B.useAsCString x $ \c -> go xs (c:cs)
    +
    +
    +-- | Use a list of ByteString as an array of CString.
    +withStringArrayLen :: [B.ByteString] -> (Int -> Ptr CString -> IO a) -> IO a
    +withStringArrayLen xs fn = withStringList xs (`withArrayLen` fn)
    +
    +
    +-- | Create a Raw.Tensor from a TensorData.
    +createRawTensor :: TensorData -> IO Raw.Tensor
    +createRawTensor (TensorData dims dt byteVec) =
    +    withArrayLen (map fromIntegral dims) $ \cdimsLen cdims -> do
    +        let len = S.length byteVec
    +        dest <- mallocArray len
    +        S.unsafeWith byteVec $ \x -> copyArray dest x len
    +        Raw.newTensor (toEnum $ fromEnum dt)
    +                      cdims (fromIntegral cdimsLen)
    +                      (castPtr dest) (fromIntegral len)
    +                      tensorDeallocFunPtr nullPtr
    +
    +{-# NOINLINE tensorDeallocFunPtr #-}
    +tensorDeallocFunPtr :: FunPtr Raw.TensorDeallocFn
    +tensorDeallocFunPtr = unsafePerformIO $ Raw.wrapTensorDealloc $ \x _ _ -> free x
    +
    +-- | Create a TensorData from a Raw.Tensor.
    +--
    +-- Takes ownership of the Raw.Tensor.
    +createTensorData :: Raw.Tensor -> IO TensorData
    +createTensorData t = do
    +    -- Read dimensions.
    +    numDims <- Raw.numDims t
    +    dims <- mapM (Raw.dim t) [0..numDims-1]
    +    -- Read type.
    +    dtype <- toEnum . fromEnum <$> Raw.tensorType t
    +    -- Read data.
    +    len <- fromIntegral <$> Raw.tensorByteSize t
    +    bytes <- castPtr <$> Raw.tensorData t :: IO (Ptr Word8)
    +    -- TODO(fmayle): Don't copy the data.
    +    v <- S.fromList <$> peekArray len bytes
    +    -- Free tensor.
    +    Raw.deleteTensor t
    +    return $ TensorData (map fromIntegral dims) dtype v
    +
    +-- | Runs the given action which does FFI calls updating a provided
    +-- status object. If the status is not OK it is thrown as
    +-- TensorFlowException.
    +checkStatus :: (Raw.Status -> IO a) -> IO a
    +checkStatus fn =
    +    bracket Raw.newStatus Raw.deleteStatus $ \status -> do
    +        result <- fn status
    +        code <- Raw.getCode status
    +        when (code /= Raw.TF_OK) $ do
    +            msg <- T.decodeUtf8With T.lenientDecode <$>
    +                   (Raw.message status >>= B.packCString)
    +            throwIO $ TensorFlowException code msg
    +        return result
    +
    +setSessionConfig :: ConfigProto -> Raw.SessionOptions -> IO ()
    +setSessionConfig pb opt =
    +    useProtoAsVoidPtrLen pb $ \ptr len ->
    +        checkStatus (Raw.setConfig opt ptr len)
    +
    +setSessionTarget :: B.ByteString -> Raw.SessionOptions -> IO ()
    +setSessionTarget target = B.useAsCString target . Raw.setTarget
    +
    +-- | Serializes the given msg and provides it as (ptr,len) argument
    +-- to the given action.
    +useProtoAsVoidPtrLen :: (Message msg, Num c) =>
    +                        msg -> (Ptr b -> c -> IO a) -> IO a
    +useProtoAsVoidPtrLen msg f = B.useAsCStringLen (encodeMessage msg) $
    +        \(bytes, len) -> f (castPtr bytes) (fromIntegral len)
    +
    +-- | Returns the serialized OpList of all OpDefs defined in this
    +-- address space.
    +getAllOpList :: IO B.ByteString
    +getAllOpList = do
    +    foreignPtr <-
    +        mask_ (newForeignPtr Raw.deleteBuffer =<< checkCall)
    +    -- Makes a copy because it is more reliable than eviscerating
    +    -- Buffer to steal its memory (including custom deallocator).
    +    withForeignPtr foreignPtr $
    +        \ptr -> B.packCStringLen =<< (,)
    +                <$> (castPtr <$> Raw.getBufferData ptr)
    +                <*> (fromIntegral <$> Raw.getBufferLength ptr)
    +    where
    +      checkCall = do
    +          p <- Raw.getAllOpList
    +          when (p == nullPtr) (throwIO exception)
    +          return p
    +      exception = TensorFlowException
    +                Raw.TF_UNKNOWN "GetAllOpList failure, check logs"
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html new file mode 100644 index 0000000..e72ddd7 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html @@ -0,0 +1,514 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/TensorFlow/Internal/Raw.hs + + + +
    -- GENERATED by C->Haskell Compiler, version 0.28.1 Switcheroo, 1 April 2016 (Haskell)
    +-- Edit the ORIGNAL .chs file instead!
    +
    +
    +{-# LINE 1 "src/TensorFlow/Internal/Raw.chs" #-}
    +-- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ForeignFunctionInterface #-}
    +
    +module TensorFlow.Internal.Raw where
    +import qualified Foreign.C.Types as C2HSImp
    +import qualified Foreign.Ptr as C2HSImp
    +import qualified Foreign.Storable as C2HSImp
    +
    +
    +
    +
    +
    +import Foreign
    +import Foreign.C
    +
    +data DataType = TF_FLOAT
    +              | TF_DOUBLE
    +              | TF_INT32
    +              | TF_UINT8
    +              | TF_INT16
    +              | TF_INT8
    +              | TF_STRING
    +              | TF_COMPLEX64
    +              | TF_COMPLEX
    +              | TF_INT64
    +              | TF_BOOL
    +              | TF_QINT8
    +              | TF_QUINT8
    +              | TF_QINT32
    +              | TF_BFLOAT16
    +              | TF_QINT16
    +              | TF_QUINT16
    +              | TF_UINT16
    +              | TF_COMPLEX128
    +              | TF_HALF
    +              | TF_RESOURCE
    +  deriving (Show,Eq)
    +instance Enum DataType where
    +  succ TF_FLOAT = TF_DOUBLE
    +  succ TF_DOUBLE = TF_INT32
    +  succ TF_INT32 = TF_UINT8
    +  succ TF_UINT8 = TF_INT16
    +  succ TF_INT16 = TF_INT8
    +  succ TF_INT8 = TF_STRING
    +  succ TF_STRING = TF_COMPLEX64
    +  succ TF_COMPLEX64 = TF_INT64
    +  succ TF_COMPLEX = TF_INT64
    +  succ TF_INT64 = TF_BOOL
    +  succ TF_BOOL = TF_QINT8
    +  succ TF_QINT8 = TF_QUINT8
    +  succ TF_QUINT8 = TF_QINT32
    +  succ TF_QINT32 = TF_BFLOAT16
    +  succ TF_BFLOAT16 = TF_QINT16
    +  succ TF_QINT16 = TF_QUINT16
    +  succ TF_QUINT16 = TF_UINT16
    +  succ TF_UINT16 = TF_COMPLEX128
    +  succ TF_COMPLEX128 = TF_HALF
    +  succ TF_HALF = TF_RESOURCE
    +  succ TF_RESOURCE = error "DataType.succ: TF_RESOURCE has no successor"
    +
    +  pred TF_DOUBLE = TF_FLOAT
    +  pred TF_INT32 = TF_DOUBLE
    +  pred TF_UINT8 = TF_INT32
    +  pred TF_INT16 = TF_UINT8
    +  pred TF_INT8 = TF_INT16
    +  pred TF_STRING = TF_INT8
    +  pred TF_COMPLEX64 = TF_STRING
    +  pred TF_COMPLEX = TF_STRING
    +  pred TF_INT64 = TF_COMPLEX64
    +  pred TF_BOOL = TF_INT64
    +  pred TF_QINT8 = TF_BOOL
    +  pred TF_QUINT8 = TF_QINT8
    +  pred TF_QINT32 = TF_QUINT8
    +  pred TF_BFLOAT16 = TF_QINT32
    +  pred TF_QINT16 = TF_BFLOAT16
    +  pred TF_QUINT16 = TF_QINT16
    +  pred TF_UINT16 = TF_QUINT16
    +  pred TF_COMPLEX128 = TF_UINT16
    +  pred TF_HALF = TF_COMPLEX128
    +  pred TF_RESOURCE = TF_HALF
    +  pred TF_FLOAT = error "DataType.pred: TF_FLOAT has no predecessor"
    +
    +  enumFromTo from to = go from
    +    where
    +      end = fromEnum to
    +      go v = case compare (fromEnum v) end of
    +                 LT -> v : go (succ v)
    +                 EQ -> [v]
    +                 GT -> []
    +
    +  enumFrom from = enumFromTo from TF_RESOURCE
    +
    +  fromEnum TF_FLOAT = 1
    +  fromEnum TF_DOUBLE = 2
    +  fromEnum TF_INT32 = 3
    +  fromEnum TF_UINT8 = 4
    +  fromEnum TF_INT16 = 5
    +  fromEnum TF_INT8 = 6
    +  fromEnum TF_STRING = 7
    +  fromEnum TF_COMPLEX64 = 8
    +  fromEnum TF_COMPLEX = 8
    +  fromEnum TF_INT64 = 9
    +  fromEnum TF_BOOL = 10
    +  fromEnum TF_QINT8 = 11
    +  fromEnum TF_QUINT8 = 12
    +  fromEnum TF_QINT32 = 13
    +  fromEnum TF_BFLOAT16 = 14
    +  fromEnum TF_QINT16 = 15
    +  fromEnum TF_QUINT16 = 16
    +  fromEnum TF_UINT16 = 17
    +  fromEnum TF_COMPLEX128 = 18
    +  fromEnum TF_HALF = 19
    +  fromEnum TF_RESOURCE = 20
    +
    +  toEnum 1 = TF_FLOAT
    +  toEnum 2 = TF_DOUBLE
    +  toEnum 3 = TF_INT32
    +  toEnum 4 = TF_UINT8
    +  toEnum 5 = TF_INT16
    +  toEnum 6 = TF_INT8
    +  toEnum 7 = TF_STRING
    +  toEnum 8 = TF_COMPLEX64
    +  toEnum 9 = TF_INT64
    +  toEnum 10 = TF_BOOL
    +  toEnum 11 = TF_QINT8
    +  toEnum 12 = TF_QUINT8
    +  toEnum 13 = TF_QINT32
    +  toEnum 14 = TF_BFLOAT16
    +  toEnum 15 = TF_QINT16
    +  toEnum 16 = TF_QUINT16
    +  toEnum 17 = TF_UINT16
    +  toEnum 18 = TF_COMPLEX128
    +  toEnum 19 = TF_HALF
    +  toEnum 20 = TF_RESOURCE
    +  toEnum unmatched = error ("DataType.toEnum: Cannot match " ++ show unmatched)
    +
    +{-# LINE 24 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +data Code = TF_OK
    +          | TF_CANCELLED
    +          | TF_UNKNOWN
    +          | TF_INVALID_ARGUMENT
    +          | TF_DEADLINE_EXCEEDED
    +          | TF_NOT_FOUND
    +          | TF_ALREADY_EXISTS
    +          | TF_PERMISSION_DENIED
    +          | TF_RESOURCE_EXHAUSTED
    +          | TF_FAILED_PRECONDITION
    +          | TF_ABORTED
    +          | TF_OUT_OF_RANGE
    +          | TF_UNIMPLEMENTED
    +          | TF_INTERNAL
    +          | TF_UNAVAILABLE
    +          | TF_DATA_LOSS
    +          | TF_UNAUTHENTICATED
    +  deriving (Show,Eq)
    +instance Enum Code where
    +  succ TF_OK = TF_CANCELLED
    +  succ TF_CANCELLED = TF_UNKNOWN
    +  succ TF_UNKNOWN = TF_INVALID_ARGUMENT
    +  succ TF_INVALID_ARGUMENT = TF_DEADLINE_EXCEEDED
    +  succ TF_DEADLINE_EXCEEDED = TF_NOT_FOUND
    +  succ TF_NOT_FOUND = TF_ALREADY_EXISTS
    +  succ TF_ALREADY_EXISTS = TF_PERMISSION_DENIED
    +  succ TF_PERMISSION_DENIED = TF_RESOURCE_EXHAUSTED
    +  succ TF_RESOURCE_EXHAUSTED = TF_FAILED_PRECONDITION
    +  succ TF_FAILED_PRECONDITION = TF_ABORTED
    +  succ TF_ABORTED = TF_OUT_OF_RANGE
    +  succ TF_OUT_OF_RANGE = TF_UNIMPLEMENTED
    +  succ TF_UNIMPLEMENTED = TF_INTERNAL
    +  succ TF_INTERNAL = TF_UNAVAILABLE
    +  succ TF_UNAVAILABLE = TF_DATA_LOSS
    +  succ TF_DATA_LOSS = TF_UNAUTHENTICATED
    +  succ TF_UNAUTHENTICATED = error "Code.succ: TF_UNAUTHENTICATED has no successor"
    +
    +  pred TF_CANCELLED = TF_OK
    +  pred TF_UNKNOWN = TF_CANCELLED
    +  pred TF_INVALID_ARGUMENT = TF_UNKNOWN
    +  pred TF_DEADLINE_EXCEEDED = TF_INVALID_ARGUMENT
    +  pred TF_NOT_FOUND = TF_DEADLINE_EXCEEDED
    +  pred TF_ALREADY_EXISTS = TF_NOT_FOUND
    +  pred TF_PERMISSION_DENIED = TF_ALREADY_EXISTS
    +  pred TF_RESOURCE_EXHAUSTED = TF_PERMISSION_DENIED
    +  pred TF_FAILED_PRECONDITION = TF_RESOURCE_EXHAUSTED
    +  pred TF_ABORTED = TF_FAILED_PRECONDITION
    +  pred TF_OUT_OF_RANGE = TF_ABORTED
    +  pred TF_UNIMPLEMENTED = TF_OUT_OF_RANGE
    +  pred TF_INTERNAL = TF_UNIMPLEMENTED
    +  pred TF_UNAVAILABLE = TF_INTERNAL
    +  pred TF_DATA_LOSS = TF_UNAVAILABLE
    +  pred TF_UNAUTHENTICATED = TF_DATA_LOSS
    +  pred TF_OK = error "Code.pred: TF_OK has no predecessor"
    +
    +  enumFromTo from to = go from
    +    where
    +      end = fromEnum to
    +      go v = case compare (fromEnum v) end of
    +                 LT -> v : go (succ v)
    +                 EQ -> [v]
    +                 GT -> []
    +
    +  enumFrom from = enumFromTo from TF_UNAUTHENTICATED
    +
    +  fromEnum TF_OK = 0
    +  fromEnum TF_CANCELLED = 1
    +  fromEnum TF_UNKNOWN = 2
    +  fromEnum TF_INVALID_ARGUMENT = 3
    +  fromEnum TF_DEADLINE_EXCEEDED = 4
    +  fromEnum TF_NOT_FOUND = 5
    +  fromEnum TF_ALREADY_EXISTS = 6
    +  fromEnum TF_PERMISSION_DENIED = 7
    +  fromEnum TF_RESOURCE_EXHAUSTED = 8
    +  fromEnum TF_FAILED_PRECONDITION = 9
    +  fromEnum TF_ABORTED = 10
    +  fromEnum TF_OUT_OF_RANGE = 11
    +  fromEnum TF_UNIMPLEMENTED = 12
    +  fromEnum TF_INTERNAL = 13
    +  fromEnum TF_UNAVAILABLE = 14
    +  fromEnum TF_DATA_LOSS = 15
    +  fromEnum TF_UNAUTHENTICATED = 16
    +
    +  toEnum 0 = TF_OK
    +  toEnum 1 = TF_CANCELLED
    +  toEnum 2 = TF_UNKNOWN
    +  toEnum 3 = TF_INVALID_ARGUMENT
    +  toEnum 4 = TF_DEADLINE_EXCEEDED
    +  toEnum 5 = TF_NOT_FOUND
    +  toEnum 6 = TF_ALREADY_EXISTS
    +  toEnum 7 = TF_PERMISSION_DENIED
    +  toEnum 8 = TF_RESOURCE_EXHAUSTED
    +  toEnum 9 = TF_FAILED_PRECONDITION
    +  toEnum 10 = TF_ABORTED
    +  toEnum 11 = TF_OUT_OF_RANGE
    +  toEnum 12 = TF_UNIMPLEMENTED
    +  toEnum 13 = TF_INTERNAL
    +  toEnum 14 = TF_UNAVAILABLE
    +  toEnum 15 = TF_DATA_LOSS
    +  toEnum 16 = TF_UNAUTHENTICATED
    +  toEnum unmatched = error ("Code.toEnum: Cannot match " ++ show unmatched)
    +
    +{-# LINE 25 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Status.
    +newtype Status = Status (C2HSImp.Ptr (Status))
    +{-# LINE 29 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newStatus :: IO Status
    +newStatus = tFNewStatus
    +{-# LINE 32 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteStatus :: Status -> IO ()
    +deleteStatus = tFDeleteStatus
    +{-# LINE 35 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setStatus :: Status -> Code -> CString -> IO ()
    +setStatus s c = tFSetStatus s (fromIntegral $ fromEnum c)
    +
    +getCode :: Status -> IO Code
    +getCode s = toEnum . fromIntegral <$> tFGetCode s
    +
    +message :: Status -> IO CString
    +message = tFMessage
    +{-# LINE 44 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Buffer.
    +data Buffer
    +type BufferPtr = C2HSImp.Ptr (Buffer)
    +{-# LINE 49 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +getBufferData :: BufferPtr -> IO (Ptr ())
    +getBufferData = (\ptr -> do {C2HSImp.peekByteOff ptr 0 :: IO (C2HSImp.Ptr ())})
    +{-# LINE 52 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +getBufferLength :: BufferPtr -> IO CULong
    +getBufferLength =(\ptr -> do {C2HSImp.peekByteOff ptr 8 :: IO C2HSImp.CULong})
    +{-# LINE 55 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +-- Tensor.
    +newtype Tensor = Tensor (C2HSImp.Ptr (Tensor))
    +{-# LINE 58 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +instance Storable Tensor where
    +    sizeOf (Tensor t) = sizeOf t
    +    alignment (Tensor t) = alignment t
    +    peek p = fmap Tensor (peek (castPtr p))
    +    poke p (Tensor t) = poke (castPtr p) t
    +
    +newTensor :: DataType
    +          -> Ptr CLong    -- dimensions array
    +          -> CInt         -- num dimensions
    +          -> Ptr ()       -- data
    +          -> CULong       -- data len
    +          -> FunPtr (Ptr () -> CULong -> Ptr () -> IO ())  -- deallocator
    +          -> Ptr ()       -- deallocator arg
    +          -> IO Tensor
    +newTensor dt = tFNewTensor (fromIntegral $ fromEnum dt)
    +
    +deleteTensor :: Tensor -> IO ()
    +deleteTensor = tFDeleteTensor
    +{-# LINE 77 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorType :: Tensor -> IO DataType
    +tensorType t = toEnum . fromIntegral <$> tFTensorType t
    +
    +numDims :: Tensor -> IO CInt
    +numDims = tFNumDims
    +{-# LINE 83 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +dim :: Tensor -> CInt -> IO CLong
    +dim = tFDim
    +{-# LINE 86 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorByteSize :: Tensor -> IO CULong
    +tensorByteSize = tFTensorByteSize
    +{-# LINE 89 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +tensorData :: Tensor -> IO (Ptr ())
    +tensorData = tFTensorData
    +{-# LINE 92 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Session Options.
    +newtype SessionOptions = SessionOptions (C2HSImp.Ptr (SessionOptions))
    +{-# LINE 96 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newSessionOptions :: IO SessionOptions
    +newSessionOptions = tFNewSessionOptions
    +{-# LINE 99 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setTarget :: SessionOptions -> CString -> IO ()
    +setTarget = tFSetTarget
    +{-# LINE 102 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +setConfig :: SessionOptions -> Ptr () -> CULong -> Status -> IO ()
    +setConfig = tFSetConfig
    +{-# LINE 105 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteSessionOptions :: SessionOptions -> IO ()
    +deleteSessionOptions = tFDeleteSessionOptions
    +{-# LINE 108 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +
    +-- Session.
    +newtype Session = Session (C2HSImp.Ptr (Session))
    +{-# LINE 112 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +newSession :: SessionOptions -> Status -> IO Session
    +newSession = tFNewSession
    +{-# LINE 115 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +closeSession :: Session -> Status -> IO ()
    +closeSession = tFCloseSession
    +{-# LINE 118 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +deleteSession :: Session -> Status -> IO ()
    +deleteSession = tFDeleteSession
    +{-# LINE 121 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +extendGraph :: Session -> Ptr () -> CULong -> Status -> IO ()
    +extendGraph = tFExtendGraph
    +{-# LINE 124 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +run :: Session
    +    -> BufferPtr                          -- RunOptions proto.
    +    -> Ptr CString -> Ptr Tensor -> CInt  -- Input (names, tensors, count).
    +    -> Ptr CString -> Ptr Tensor -> CInt  -- Output (names, tensors, count).
    +    -> Ptr CString -> CInt                -- Target nodes (names, count).
    +    -> BufferPtr                          -- RunMetadata proto.
    +    -> Status
    +    -> IO ()
    +run = tFRun
    +{-# LINE 134 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +-- FFI helpers.
    +type TensorDeallocFn = Ptr () -> CULong -> Ptr () -> IO ()
    +foreign import ccall "wrapper"
    +    wrapTensorDealloc :: TensorDeallocFn -> IO (FunPtr TensorDeallocFn)
    +
    +
    +-- | Get the OpList of all OpDefs defined in this address space.
    +-- Returns a BufferPtr, ownership of which is transferred to the caller
    +-- (and can be freed using deleteBuffer).
    +--
    +-- The data in the buffer will be the serialized OpList proto for ops registered
    +-- in this address space.
    +getAllOpList :: IO BufferPtr
    +getAllOpList = tFGetAllOpList
    +{-# LINE 149 "src/TensorFlow/Internal/Raw.chs" #-}
    +
    +
    +foreign import ccall "&TF_DeleteBuffer"
    +  deleteBuffer :: FunPtr (BufferPtr -> IO ())
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewStatus"
    +  tFNewStatus :: (IO (Status))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteStatus"
    +  tFDeleteStatus :: ((Status) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetStatus"
    +  tFSetStatus :: ((Status) -> (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ()))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetCode"
    +  tFGetCode :: ((Status) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Message"
    +  tFMessage :: ((Status) -> (IO (C2HSImp.Ptr C2HSImp.CChar)))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewTensor"
    +  tFNewTensor :: (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CLong) -> (C2HSImp.CInt -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.FunPtr ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.Ptr ()) -> (IO ()))))) -> ((C2HSImp.Ptr ()) -> (IO (Tensor)))))))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteTensor"
    +  tFDeleteTensor :: ((Tensor) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorType"
    +  tFTensorType :: ((Tensor) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NumDims"
    +  tFNumDims :: ((Tensor) -> (IO C2HSImp.CInt))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Dim"
    +  tFDim :: ((Tensor) -> (C2HSImp.CInt -> (IO C2HSImp.CLong)))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorByteSize"
    +  tFTensorByteSize :: ((Tensor) -> (IO C2HSImp.CULong))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorData"
    +  tFTensorData :: ((Tensor) -> (IO (C2HSImp.Ptr ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewSessionOptions"
    +  tFNewSessionOptions :: (IO (SessionOptions))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetTarget"
    +  tFSetTarget :: ((SessionOptions) -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetConfig"
    +  tFSetConfig :: ((SessionOptions) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteSessionOptions"
    +  tFDeleteSessionOptions :: ((SessionOptions) -> (IO ()))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewSession"
    +  tFNewSession :: ((SessionOptions) -> ((Status) -> (IO (Session))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_CloseSession"
    +  tFCloseSession :: ((Session) -> ((Status) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteSession"
    +  tFDeleteSession :: ((Session) -> ((Status) -> (IO ())))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_ExtendGraph"
    +  tFExtendGraph :: ((Session) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Run"
    +  tFRun :: ((Session) -> ((BufferPtr) -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> (C2HSImp.CInt -> ((BufferPtr) -> ((Status) -> (IO ())))))))))))))
    +
    +foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetAllOpList"
    +  tFGetAllOpList :: (IO (BufferPtr))
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html new file mode 100644 index 0000000..3f4826d --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html @@ -0,0 +1,61 @@ + + + + + +src/TensorFlow/Internal/VarInt.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE BangPatterns #-}
    +
    +{-|
    +Module      : TensorFlow.Internal.VarInt
    +Description : Encoders and decoders for varint types.
    +
    +Originally taken from internal proto-lens code.
    +-}
    +module TensorFlow.Internal.VarInt
    +    ( getVarInt
    +    , putVarInt
    +    ) where
    +
    +import Data.Attoparsec.ByteString as Parse
    +import Data.Bits
    +import Data.ByteString.Lazy.Builder as Builder
    +import Data.Monoid ((<>))
    +import Data.Word (Word64)
    +
    +-- | Decode an unsigned varint.
    +getVarInt :: Parser Word64
    +getVarInt = loop 1 0
    +  where
    +    loop !s !n = do
    +        b <- anyWord8
    +        let n' = n + s * fromIntegral (b .&. 127)
    +        if (b .&. 128) == 0
    +            then return n'
    +            else loop (128*s) n'
    +
    +-- | Encode a Word64.
    +putVarInt :: Word64 -> Builder
    +putVarInt n
    +    | n < 128 = Builder.word8 (fromIntegral n)
    +    | otherwise = Builder.word8 (fromIntegral $ n .&. 127 .|. 128)
    +                      <> putVarInt (n `shiftR` 7)
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html new file mode 100644 index 0000000..c5a5d69 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html @@ -0,0 +1,152 @@ + + + + + +src/TensorFlow/Nodes.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE MultiParamTypeClasses #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +module TensorFlow.Nodes where
    +
    +import Control.Applicative (liftA2, liftA3)
    +import Data.Map.Strict (Map)
    +import Data.Monoid ((<>))
    +import Data.Set (Set)
    +import Data.String (IsString)
    +import Data.Text (Text)
    +import Lens.Family2 ((^.))
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +import qualified Data.Vector as V
    +
    +import TensorFlow.Build
    +import TensorFlow.Output
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | Types that contain ops which can be run.
    +class Nodes t where
    +    getNodes :: t -> Build (Set NodeName)
    +
    +-- | Types that tensor representations (e.g. 'Tensor', 'ControlNode') can be
    +-- fetched into.
    +--
    +-- Includes collections of tensors (e.g. tuples).
    +class Nodes t => Fetchable t a where
    +    getFetch :: t -> Build (Fetch a)
    +
    +-- | Fetch action. Keeps track of what needs to be fetched and how to decode
    +-- the fetched data.
    +data Fetch a = Fetch
    +          { -- | Nodes to fetch
    +            fetches :: Set Text
    +            -- | Function to create an 'a' from the fetched data.
    +          , fetchRestore :: Map Text FFI.TensorData -> a
    +          }
    +
    +instance Functor Fetch where
    +    fmap f (Fetch fetch restore) = Fetch fetch (f . restore)
    +
    +instance Applicative Fetch where
    +    pure x = Fetch Set.empty (const x)
    +    Fetch fetch restore <*> Fetch fetch' restore' =
    +        Fetch (fetch <> fetch') (restore <*> restore')
    +
    +nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b
    +nodesUnion = fmap (foldMap id) . sequenceA
    +
    +instance (Nodes t1, Nodes t2) => Nodes (t1, t2) where
    +    getNodes (x, y) = nodesUnion [getNodes x, getNodes y]
    +
    +instance (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) where
    +    getNodes (x, y, z) = nodesUnion [getNodes x, getNodes y, getNodes z]
    +
    +instance (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) where
    +    getFetch (x, y) = liftA2 (,) <$> getFetch x <*> getFetch y
    +
    +instance (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3)
    +         => Fetchable (t1, t2, t3) (a1, a2, a3) where
    +    getFetch (x, y, z) =
    +        liftA3 (,,) <$> getFetch x <*> getFetch y <*> getFetch z
    +
    +instance Nodes t => Nodes [t] where
    +    getNodes = nodesUnion . map getNodes
    +
    +instance Fetchable t a => Fetchable [t] [a] where
    +    getFetch ts  = sequenceA <$> mapM getFetch ts
    +
    +instance Nodes ControlNode where
    +    getNodes (ControlNode o) = Set.singleton <$> getOrAddOp o
    +
    +-- We use the constraint @(a ~ ())@ to help with type inference.  For example,
    +-- if @t :: ControlNode@, then this constraint ensures that @run t :: Session
    +-- ()@.  If we used @instance Fetchable ControlNode ()@ instead, then that
    +-- expression would be ambiguous without explicitly specifying the return type.
    +instance a ~ () => Fetchable ControlNode a where
    +    getFetch _ = return $ pure ()
    +
    +instance Nodes (Tensor v a) where
    +    getNodes t = Set.singleton <$> getOrAddOp (t ^. tensorOutput . outputOp)
    +
    +fetchTensorList :: TensorType a => Tensor v a -> Build (Fetch (Shape, [a]))
    +fetchTensorList t = fmap (fmap V.toList) <$> fetchTensorVector t
    +
    +fetchTensorVector :: forall a v . TensorType a
    +                  => Tensor v a -> Build (Fetch (Shape, V.Vector a))
    +fetchTensorVector (Tensor _ o) = do
    +    outputName <- renderOutput o
    +    return $ Fetch (Set.singleton outputName) $ \tensors ->
    +        let tensorData = tensors Map.! outputName
    +            shape = Shape $ FFI.tensorDataDimensions tensorData
    +            vec = decodeTensorData $ TensorData tensorData
    +
    +            expectedType = tensorType (undefined :: a)
    +            actualType = FFI.tensorDataType tensorData
    +            badTypeError = error $ "Bad tensor type: expected "
    +                                   ++ show expectedType
    +                                   ++ ", got "
    +                                   ++ show actualType
    +        in if expectedType /= actualType
    +               then badTypeError
    +               else (shape, vec)
    +
    +-- The constraint "a ~ a'" means that the input/output of fetch can constrain
    +-- the TensorType of each other.
    +instance (TensorType a, a ~ a') => Fetchable (Tensor v a) (V.Vector a') where
    +    getFetch t = fmap snd <$> fetchTensorVector t
    +
    +newtype Scalar a = Scalar {unScalar :: a}
    +    deriving (Show, Eq, Ord, Num, Fractional, Floating, Real, RealFloat,
    +              RealFrac, IsString)
    +
    +instance (TensorType a, a ~ a') => Fetchable (Tensor v a) (Scalar a') where
    +    getFetch t = fmap (Scalar . headFromSingleton . snd) <$> fetchTensorList t
    +      where
    +        headFromSingleton [x] = x
    +        headFromSingleton xs
    +            = error $ "Unable to extract singleton from tensor of length "
    +                          ++ show (length xs)
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html new file mode 100644 index 0000000..a0f10f0 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html @@ -0,0 +1,57 @@ + + + + + +src/TensorFlow/Orphans.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +
    +{-# LANGUAGE StandaloneDeriving #-}
    +{-# OPTIONS_GHC -fno-warn-orphans #-}
    +-- Orphan instances for certain proto messages/enums, used internally.
    +-- TODO(judahjacobson): consider making proto-lens generate some or all of
    +-- these automatically; or, alternately, make new Haskell datatypes.
    +module TensorFlow.Orphans() where
    +
    +import Proto.Tensorflow.Core.Framework.AttrValue
    +    ( AttrValue(..)
    +    , AttrValue'ListValue(..)
    +    , NameAttrList(..)
    +    )
    +import Proto.Tensorflow.Core.Framework.NodeDef
    +    ( NodeDef(..))
    +import Proto.Tensorflow.Core.Framework.ResourceHandle
    +    ( ResourceHandle(..))
    +import Proto.Tensorflow.Core.Framework.Tensor
    +    (TensorProto(..))
    +import Proto.Tensorflow.Core.Framework.TensorShape
    +    (TensorShapeProto(..), TensorShapeProto'Dim(..))
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +
    +deriving instance Ord AttrValue
    +deriving instance Ord AttrValue'ListValue
    +deriving instance Ord DataType
    +deriving instance Ord NameAttrList
    +deriving instance Ord NodeDef
    +deriving instance Ord ResourceHandle
    +deriving instance Ord TensorProto
    +deriving instance Ord TensorShapeProto
    +deriving instance Ord TensorShapeProto'Dim
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html new file mode 100644 index 0000000..6a8759a --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html @@ -0,0 +1,167 @@ + + + + + +src/TensorFlow/Output.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +
    +module TensorFlow.Output
    +    ( ControlNode(..)
    +    , Device(..)
    +    -- * Ops
    +    , NodeName(..)
    +    , Op(..)
    +    , opUnrendered
    +    , OpDef(..)
    +    , opName
    +    , opType
    +    , opAttr
    +    , opInputs
    +    , opControlInputs
    +    , OpType(..)
    +    , OutputIx(..)
    +    , Output(..)
    +    , output
    +    , outputIndex
    +    , outputOp
    +    , PendingNodeName(..)
    +    )  where
    +
    +import qualified Data.Map.Strict as Map
    +import Data.ProtoLens.TextFormat (showMessage)
    +import Data.String (IsString(..))
    +import Data.Text (Text)
    +import qualified Data.Text as Text
    +import Lens.Family2 (Lens', Traversal', (.~), (&), (^.))
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.AttrValue (AttrValue(..))
    +import Proto.Tensorflow.Core.Framework.NodeDef (NodeDef(..), name)
    +import Data.Default (def)
    +import TensorFlow.Types (Attribute, attrLens)
    +import TensorFlow.Orphans ()
    +
    +-- | A type of graph node which has no outputs. These nodes are
    +-- valuable for causing side effects when they are run.
    +newtype ControlNode = ControlNode { unControlNode :: Op }
    +
    +-- | The type of op of a node in the graph.  This corresponds to the proto field
    +-- NodeDef.op.
    +newtype OpType = OpType { unOpType :: Text }
    +    deriving (Eq, Ord, Show)
    +
    +instance IsString OpType where
    +    fromString = OpType . Text.pack
    +
    +-- | An output of a TensorFlow node.
    +data Output = Output !OutputIx !Op
    +    deriving (Eq, Ord, Show)
    +
    +output :: OutputIx -> Op -> Output
    +output = Output
    +
    +outputOp :: Lens' Output Op
    +outputOp = lens (\(Output _ o) -> o) (\(Output i _) o -> Output i o)
    +
    +outputIndex :: Lens' Output OutputIx
    +outputIndex = lens (\(Output i _) -> i) (\(Output _ o) i -> Output i o)
    +
    +newtype OutputIx = OutputIx { unOutputIx :: Int }
    +    deriving (Eq, Ord, Num, Enum, Show)
    +
    +-- | A device that a node can be assigned to.
    +-- There's a naming convention where the device names
    +-- are constructed from job and replica names.
    +newtype Device = Device {deviceName :: Text}
    +    deriving (Eq, Ord, IsString)
    +
    +instance Show Device where
    +    show (Device d) = show d
    +
    +-- | The representation of a node in a TensorFlow graph.
    +data Op
    +    = Rendered !NodeDef  -- ^ Properties are fixed, including the
    +                         -- device, name, and scope.
    +    | Unrendered !OpDef  -- ^ Properties are not fixed, and may change depending
    +                         -- on which context this op is rendered in.
    +    deriving (Eq, Ord)
    +
    +instance Show Op where
    +    show (Rendered n) = "Rendered " ++ showMessage n
    +    show (Unrendered o) = "Unrendered " ++ show (o ^. opName)
    +
    +-- | Traverse on the 'Unrendered' of an 'Op'.
    +--
    +-- Same implementation as _Left.
    +opUnrendered :: Traversal' Op OpDef
    +opUnrendered f (Unrendered a) = Unrendered <$> f a
    +opUnrendered _ (Rendered b) = pure (Rendered b)
    +
    +-- | Op definition. This corresponds somewhat to the 'NodeDef' proto.
    +data OpDef = OpDef
    +    { _opName :: !PendingNodeName
    +    , _opType :: !OpType
    +    , _opAttrs :: !(Map.Map Text AttrValue)
    +    , _opInputs :: [Output]
    +    , _opControlInputs :: [NodeName]
    +    }  deriving (Eq, Ord)
    +
    +-- | The name specified for an unrendered Op.  If an Op has an
    +-- ImplicitName, it will be assigned based on the opType plus a
    +-- unique identifier.  Does not contain the "scope" prefix.
    +data PendingNodeName = ExplicitName !Text | ImplicitName
    +    deriving (Eq, Ord, Show)
    +
    +-- | The name of a node in the graph.  This corresponds to the proto field
    +-- NodeDef.name.  Includes the scope prefix (if any) and a unique identifier
    +-- (if the node was implicitly named).
    +newtype NodeName = NodeName { unNodeName :: Text }
    +    deriving (Eq, Ord, Show)
    +
    +opName :: Lens' OpDef PendingNodeName
    +opName = lens _opName (\o x -> o {_opName = x})
    +
    +opType :: Lens' OpDef OpType
    +opType = lens _opType (\o x -> o { _opType = x})
    +
    +opAttr :: Attribute a => Text -> Lens' OpDef a
    +opAttr n = lens _opAttrs (\o x -> o {_opAttrs = x})
    +              . lens (Map.findWithDefault def n) (flip (Map.insert n))
    +              . attrLens
    +
    +opInputs :: Lens' OpDef [Output]
    +opInputs = lens _opInputs (\o x -> o {_opInputs = x})
    +
    +opControlInputs :: Lens' OpDef [NodeName]
    +opControlInputs = lens _opControlInputs (\o x -> o {_opControlInputs = x})
    +
    +-- TODO(gnezdo): IsString instance is weird and we should move that
    +-- code into a Build function
    +instance IsString Output where
    +    fromString s = case break (==':') s of
    +        (n, ':':ixStr)
    +            | [(ix, "")] <- read ixStr -> Output (fromInteger ix) $ assigned n
    +        _ -> Output 0 $ assigned s
    +        where assigned n = Rendered $ def & name .~ Text.pack n
    +
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html new file mode 100644 index 0000000..be4612a --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html @@ -0,0 +1,213 @@ + + + + + +src/TensorFlow/Session.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE GeneralizedNewtypeDeriving #-}
    +{-# LANGUAGE Rank2Types #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TupleSections #-}
    +
    +module TensorFlow.Session (
    +    Session,
    +    -- * Opaque value created via 'sessionConfig' and 'sessionTarget'.
    +    SessionOption,
    +    sessionConfig,
    +    sessionTarget,
    +    runSession,
    +    runSessionWithOptions,
    +    build,
    +    buildAnd,
    +    buildWithSummary,
    +    extend,
    +    addGraphDef,
    +    run,
    +    runWithFeeds,
    +    run_,
    +    runWithFeeds_,
    +    asyncProdNodes,
    +    ) where
    +
    +import Control.Monad (forever, unless, void)
    +import Control.Monad.IO.Class (MonadIO, liftIO)
    +import Control.Monad.Trans.Class (lift)
    +import Control.Monad.Trans.Reader (ReaderT(..), ask, asks)
    +import Data.ByteString (ByteString)
    +import Data.Functor.Identity (runIdentity)
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +import Data.Set (Set)
    +import Data.Text.Encoding (encodeUtf8)
    +import Data.ProtoLens (def)
    +import Lens.Family2 ((&), (.~))
    +import Proto.Tensorflow.Core.Framework.Graph (node)
    +import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
    +
    +import TensorFlow.Build
    +import qualified TensorFlow.Internal.FFI as FFI
    +import qualified TensorFlow.Internal.Raw as Raw
    +import TensorFlow.Nodes
    +import TensorFlow.Output (NodeName, unNodeName)
    +import TensorFlow.Tensor
    +
    +-- Common state threaded through the session.
    +data SessionState
    +    = SessionState {
    +          rawSession :: FFI.Session
    +        , asyncCollector :: IO () -> IO ()
    +          -- ^ Starts the given action concurrently.
    +        }
    +
    +newtype Session a
    +    = Session (ReaderT SessionState (BuildT IO) a)
    +    deriving (Functor, Applicative, Monad, MonadIO)
    +
    +-- | Run 'Session' actions in a new TensorFlow session.
    +runSession :: Session a -> IO a
    +runSession = runSessionWithOptions []
    +
    +-- | Setting of an option for the session (see 'runSessionWithOptions').
    +newtype SessionOption =
    +    SessionOption { unSesssionOption :: Raw.SessionOptions -> IO () }
    +
    +-- | Target can be: "local", ip:port, host:port.
    +-- The set of supported factories depends on the linked in libraries.
    +-- REQUIRES "//learning/brain/public:tensorflow_remote" dependency for the binary.
    +sessionTarget :: ByteString -> SessionOption
    +sessionTarget = SessionOption . FFI.setSessionTarget
    +
    +-- | Uses the specified config for the created session.
    +sessionConfig :: ConfigProto -> SessionOption
    +sessionConfig = SessionOption . FFI.setSessionConfig
    +
    +-- | Run 'Session' actions in a new TensorFlow session created with
    +-- the given option setter actions ('sessionTarget', 'sessionConfig').
    +runSessionWithOptions :: [SessionOption] -> Session a -> IO a
    +runSessionWithOptions options (Session m) =
    +    FFI.withSession applyOptions $
    +        \as rs -> evalBuildT (runReaderT m (SessionState rs as))
    +  where applyOptions opt = mapM_ (`unSesssionOption` opt) options
    +
    +-- | Lift a 'Build' action into a 'Session', including any explicit op
    +-- renderings.
    +build :: Build a -> Session a
    +build = Session . lift . hoistBuildT (return . runIdentity)
    +
    +-- | Lift a 'Build' action into a 'Session', including any explicit op
    +-- renderings. Returns the merged summary ops which can be used for
    +-- logging, see 'TensorFlow.Logging.build' for a convenient wrapper.
    +buildWithSummary :: forall a . Build a -> Session (a, [SummaryTensor])
    +buildWithSummary b = Session $ lift $ (,) <$> v <*> collectAllSummaries
    +  where v :: BuildT IO a
    +        v = hoistBuildT (return . runIdentity) b
    +
    +-- | Add all pending rendered nodes to the TensorFlow graph and runs
    +-- any pending initializers.
    +--
    +-- Note that run, runWithFeeds, etc. will all call this function implicitly.
    +extend :: Session ()
    +extend = do
    +    let withSessionWhen vs action =
    +            unless (null vs) $ Session (asks rawSession) >>= action
    +    nodesToExtend <- build flushNodeBuffer
    +    withSessionWhen nodesToExtend $ \session ->
    +        liftIO $ FFI.extendGraph session
    +               $ def & node .~ nodesToExtend
    +    -- Now that all the nodes are created, run the initializers.
    +    initializers <- build flushInitializers
    +    withSessionWhen initializers $ \session ->
    +        void $ liftIO $ FFI.run session [] [] (toNodeNames initializers)
    +
    +-- | Helper combinator for doing something with the result of a 'Build' action.
    +-- Example usage:
    +--
    +-- > buildAnd run :: Fetchable t a => Build t -> Session a
    +buildAnd :: (a -> Session b) -> Build a -> Session b
    +buildAnd f m = build m >>= f
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, and fetch the corresponding values for 'a'.
    +run :: Fetchable t a => t -> Session a
    +run = runWithFeeds []
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, feed the given input values, and fetch the corresponding result
    +-- values for 'a'.
    +runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a
    +runWithFeeds feeds t = do
    +    ns <- build $ getNodes t
    +    -- Note that this call to "fetch" shouldn't affect the following "extend"
    +    -- call, since all nodes in t and its inputs/deps will be rendered by the
    +    -- above call to getNodes.
    +    fetch <- build $ getFetch t
    +    runFetchWithFeeds feeds ns fetch
    +
    +runFetchWithFeeds :: [Feed] -> Set NodeName -> Fetch a -> Session a
    +runFetchWithFeeds feeds target (Fetch fetch restore) = do
    +    extend
    +    feeds' <- build $ fixFeeds feeds
    +    let fetchNames = encodeUtf8 <$> Set.toList fetch
    +        targetNames = toNodeNames $ Set.toList target
    +    session <- Session (asks rawSession)
    +    runResult <- liftIO $ FFI.run session
    +                                  feeds'
    +                                  fetchNames
    +                                  targetNames
    +    let resultTensorsMap = Map.fromList $ zip (Set.toList fetch) runResult
    +    return $ restore resultTensorsMap
    +
    +toNodeNames :: [NodeName] -> [ByteString]
    +toNodeNames = map (encodeUtf8 . unNodeName)
    +
    +-- | Run a subgraph 't', rendering and extending any dependent nodes that aren't
    +-- already rendered.  This behaves like 'run' except that it doesn't do any
    +-- fetches.
    +run_ :: Nodes t => t -> Session ()
    +run_ = runWithFeeds_ []
    +
    +-- | Run a subgraph 't', rendering any dependent nodes that aren't already
    +-- rendered, feed the given input values, and fetch the corresponding result
    +-- values for 'a'.  This behaves like 'runWithFeeds' except that it doesn't do
    +-- any fetches.
    +runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()
    +runWithFeeds_ feeds t = do
    +    ns <- build $ getNodes t
    +    runFetchWithFeeds feeds ns (pure ())
    +
    +fixFeeds :: [Feed] -> Build [(ByteString, FFI.TensorData)]
    +fixFeeds = mapM $ \(Feed o d) -> (,d) . encodeUtf8 <$> renderOutput o
    +
    +-- | Starts a concurrent thread which evaluates the given Nodes
    +-- forever until runSession exits or an exception occurs. Graph
    +-- extension happens synchronously, but the resultant run proceeds as
    +-- a separate thread.
    +asyncProdNodes :: Nodes t
    +                  => t  -- ^ Node to evaluate concurrently.
    +                  -> Session ()
    +asyncProdNodes nodes = do
    +    target <- build (getNodes nodes)
    +    extend
    +    let targetNames = toNodeNames $ Set.toList target
    +    state <- Session ask
    +    let loop = forever (void (FFI.run (rawSession state) [] [] targetNames))
    +    liftIO (asyncCollector state loop)
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html new file mode 100644 index 0000000..282d865 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html @@ -0,0 +1,96 @@ + + + + + +src/TensorFlow/Tensor.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE GADTs #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE Rank2Types #-}
    +
    +module TensorFlow.Tensor where
    +
    +import Data.String (IsString(..))
    +import qualified Data.Text as Text
    +import Lens.Family2 (Lens', Traversal')
    +import Lens.Family2.Unchecked (lens)
    +
    +import TensorFlow.Output (Output, outputOp, opUnrendered, opAttr)
    +import TensorFlow.Types (TensorData(..), Attribute)
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | A named output of a TensorFlow operation.
    +--
    +-- The type parameter @a@ is the type of the elements in the 'Tensor'.  The
    +-- parameter @v@ is either 'Value' or 'Ref', depending on whether the graph is
    +-- treating this op output as an immutable 'Value' or a stateful 'Ref' (e.g., a
    +-- variable).  Note that a @Tensor Ref@ can be casted into a @Tensor Value@ via
    +-- 'value'.
    +data Tensor v a = Tensor (TensorKind v) Output
    +
    +data Value
    +data Ref
    +
    +-- | This class provides a runtime switch on whether a 'Tensor' should be
    +-- treated as a 'Value' or as a 'Ref'.
    +data TensorKind v where
    +  ValueKind :: TensorKind Value
    +  RefKind :: TensorKind Ref
    +
    +tensorKind :: Lens' (Tensor v a) (TensorKind v)
    +tensorKind = lens (\(Tensor v _) -> v) (\(Tensor _ o) v -> Tensor v o)
    +
    +tensorOutput :: Lens' (Tensor v a) Output
    +tensorOutput = lens (\(Tensor _ o) -> o) (\(Tensor v _) o -> Tensor v o)
    +
    +-- TODO: Come up with a better API for handling attributes.
    +-- | Lens for the attributes of a tensor.
    +--
    +-- Only valid if the tensor has not yet been rendered. If the tensor has been
    +-- rendered, the traversal will be over nothing (nothing can be read or
    +-- written).
    +tensorAttr :: Attribute attr => Text.Text -> Traversal' (Tensor v a) attr
    +tensorAttr x = tensorOutput . outputOp . opUnrendered . opAttr x
    +
    +-- | Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a
    +-- Ref into Value. This behaves like a no-op.
    +value :: Tensor v a -> Tensor Value a
    +value (Tensor _ o) = Tensor ValueKind o
    +
    +-- | A pair of a 'Tensor' and some data that should be fed into that 'Tensor'
    +-- when running the graph.
    +data Feed = Feed Output FFI.TensorData
    +
    +-- | Create a 'Feed' for feeding the given data into a 'Tensor' when running
    +-- the graph.
    +--
    +-- Note that if a 'Tensor' is rendered, its identity may change; so feeding the
    +-- rendered 'Tensor' may be different than feeding the original 'Tensor'.
    +feed :: Tensor v a -> TensorData a -> Feed
    +feed (Tensor _ o) (TensorData td) = Feed o td
    +
    +-- | Create a 'Tensor' for a given name.  This can be used to reference nodes
    +-- in a 'GraphDef' that was loaded via 'addGraphDef'.
    +-- TODO(judahjacobson): add more safety checks here.
    +tensorFromName :: TensorKind v -> Text.Text -> Tensor v a
    +tensorFromName v = Tensor v . fromString . Text.unpack
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html new file mode 100644 index 0000000..0f3bf98 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html @@ -0,0 +1,393 @@ + + + + + +src/TensorFlow/Types.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE TypeOperators #-}
    +-- We use UndecidableInstances for type families with recursive definitions
    +-- like "\\".  Those instances will terminate since each equation unwraps one
    +-- cons cell of a type-level list.
    +{-# LANGUAGE UndecidableInstances #-}
    +
    +module TensorFlow.Types
    +    ( TensorType(..)
    +    , TensorData(..)
    +    , Shape(..)
    +    , protoShape
    +    , Attribute(..)
    +    -- * Type constraints
    +    , OneOf
    +    , type (/=)
    +    -- ** Implementation of constraints
    +    , TypeError
    +    , ExcludedCase
    +    , TensorTypes
    +    , NoneOf
    +    , type (\\)
    +    , Delete
    +    , AllTensorTypes
    +    ) where
    +
    +import Data.Complex (Complex)
    +import Data.Default (def)
    +import Data.Int (Int8, Int16, Int32, Int64)
    +import Data.Monoid ((<>))
    +import Data.Word (Word8, Word16, Word64)
    +import Foreign.Storable (Storable)
    +import GHC.Exts (Constraint, IsList(..))
    +import Lens.Family2 (Lens', view, (&), (.~))
    +import Lens.Family2.Unchecked (iso)
    +import qualified Data.Attoparsec.ByteString as Atto
    +import Data.ByteString (ByteString)
    +import qualified Data.ByteString as B
    +import Data.ByteString.Builder (Builder)
    +import qualified Data.ByteString.Builder as Builder
    +import qualified Data.ByteString.Lazy as L
    +import qualified Data.Vector as V
    +import qualified Data.Vector.Storable as S
    +import Proto.Tensorflow.Core.Framework.AttrValue
    +    ( AttrValue(..)
    +    , AttrValue'ListValue(..)
    +    , b
    +    , f
    +    , i
    +    , s
    +    , list
    +    , type'
    +    , shape
    +    , tensor
    +    )
    +import Proto.Tensorflow.Core.Framework.Tensor as Tensor
    +    ( TensorProto(..)
    +    , floatVal
    +    , doubleVal
    +    , intVal
    +    , stringVal
    +    , int64Val
    +    , stringVal
    +    , boolVal
    +    )
    +import Proto.Tensorflow.Core.Framework.TensorShape
    +    ( TensorShapeProto(..)
    +    , dim
    +    , size
    +    )
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +
    +import TensorFlow.Internal.VarInt (getVarInt, putVarInt)
    +import qualified TensorFlow.Internal.FFI as FFI
    +
    +-- | Data about a tensor that is encoded for the TensorFlow APIs.
    +newtype TensorData a = TensorData { unTensorData :: FFI.TensorData }
    +
    +-- | The class of scalar types supported by tensorflow.
    +class TensorType a where
    +    tensorType :: a -> DataType
    +    tensorRefType :: a -> DataType
    +    tensorVal :: Lens' TensorProto [a]
    +    -- | Decode the bytes of a TensorData into a Vector.
    +    decodeTensorData :: TensorData a -> V.Vector a
    +    -- | Encode a Vector into a TensorData.
    +    --
    +    -- The values should be in row major order, e.g.,
    +    --
    +    --   element 0:   index (0, ..., 0)
    +    --   element 1:   index (0, ..., 1)
    +    --   ...
    +    encodeTensorData :: Shape -> V.Vector a -> TensorData a
    +
    +-- All types, besides ByteString, are encoded as simple arrays and we can use
    +-- Vector.Storable to encode/decode by type casting pointers.
    +
    +-- TODO(fmayle): Assert that the data type matches the return type.
    +simpleDecode :: Storable a => TensorData a -> V.Vector a
    +simpleDecode = S.convert . S.unsafeCast . FFI.tensorDataBytes . unTensorData
    +
    +simpleEncode :: forall a . (TensorType a, Storable a)
    +             => Shape -> V.Vector a -> TensorData a
    +simpleEncode (Shape xs)
    +    = TensorData . FFI.TensorData xs dt . S.unsafeCast . S.convert
    +  where
    +    dt = tensorType (undefined :: a)
    +
    +instance TensorType Float where
    +    tensorType _ = DT_FLOAT
    +    tensorRefType _ = DT_FLOAT_REF
    +    tensorVal = floatVal
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Double where
    +    tensorType _ = DT_DOUBLE
    +    tensorRefType _ = DT_DOUBLE_REF
    +    tensorVal = doubleVal
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Int32 where
    +    tensorType _ = DT_INT32
    +    tensorRefType _ = DT_INT32_REF
    +    tensorVal = intVal
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Int64 where
    +    tensorType _ = DT_INT64
    +    tensorRefType _ = DT_INT64_REF
    +    tensorVal = int64Val
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +integral :: Integral a => Lens' [Int32] [a]
    +integral = iso (fmap fromIntegral) (fmap fromIntegral)
    +
    +instance TensorType Word8 where
    +    tensorType _ = DT_UINT8
    +    tensorRefType _ = DT_UINT8_REF
    +    tensorVal = intVal . integral
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Word16 where
    +    tensorType _ = DT_UINT16
    +    tensorRefType _ = DT_UINT16_REF
    +    tensorVal = intVal . integral
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Int16 where
    +    tensorType _ = DT_INT16
    +    tensorRefType _ = DT_INT16_REF
    +    tensorVal = intVal . integral
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType Int8 where
    +    tensorType _ = DT_INT8
    +    tensorRefType _ = DT_INT8_REF
    +    tensorVal = intVal . integral
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType ByteString where
    +    tensorType _ = DT_STRING
    +    tensorRefType _ = DT_STRING_REF
    +    tensorVal = stringVal
    +    -- Encoded data layout (described in third_party/tensorflow/c/c_api.h):
    +    --   table offsets for each element :: [Word64]
    +    --   at each element offset:
    +    --     string length :: VarInt64
    +    --     string data   :: [Word8]
    +    -- TODO(fmayle): Benchmark these functions.
    +    decodeTensorData tensorData =
    +        either (\err -> error $ "Malformed TF_STRING tensor; " ++ err) id $
    +            if expected /= count
    +                then Left $ "decodeTensorData for ByteString count mismatch " ++
    +                            show (expected, count)
    +                else V.mapM decodeString (S.convert offsets)
    +      where
    +        expected = S.length offsets
    +        count = fromIntegral $ product $ FFI.tensorDataDimensions
    +                    $ unTensorData tensorData
    +        bytes = FFI.tensorDataBytes $ unTensorData tensorData
    +        offsets = S.take count $ S.unsafeCast bytes :: S.Vector Word64
    +        dataBytes = B.pack $ S.toList $ S.drop (count * 8) bytes
    +        decodeString :: Word64 -> Either String ByteString
    +        decodeString offset =
    +            let stringDataStart = B.drop (fromIntegral offset) dataBytes
    +            in Atto.eitherResult $ Atto.parse stringParser stringDataStart
    +        stringParser :: Atto.Parser ByteString
    +        stringParser = getVarInt >>= Atto.take . fromIntegral
    +    encodeTensorData (Shape xs) vec =
    +        TensorData $ FFI.TensorData xs dt byteVector
    +      where
    +        dt = tensorType (undefined :: ByteString)
    +        -- Add a string to an offset table and data blob.
    +        addString :: (Builder, Builder, Word64)
    +                  -> ByteString
    +                  -> (Builder, Builder, Word64)
    +        addString (table, strings, offset) str =
    +            ( table <> Builder.word64LE offset
    +            , strings <> lengthBytes <> Builder.byteString str
    +            , offset + lengthBytesLen + strLen
    +            )
    +          where
    +            strLen = fromIntegral $ B.length str
    +            lengthBytes = putVarInt $ fromIntegral $ B.length str
    +            lengthBytesLen =
    +                fromIntegral $ L.length $ Builder.toLazyByteString lengthBytes
    +        -- Encode all strings.
    +        (table', strings', _) = V.foldl' addString (mempty, mempty, 0) vec
    +        -- Concat offset table with data.
    +        bytes = table' <> strings'
    +        -- Convert to Vector Word8.
    +        byteVector = S.fromList $ L.unpack $ Builder.toLazyByteString bytes
    +
    +
    +instance TensorType Bool where
    +    tensorType _ = DT_BOOL
    +    tensorRefType _ = DT_BOOL_REF
    +    tensorVal = boolVal
    +    decodeTensorData = simpleDecode
    +    encodeTensorData = simpleEncode
    +
    +instance TensorType (Complex Float) where
    +    tensorType _ = DT_COMPLEX64
    +    tensorRefType _ = DT_COMPLEX64
    +    tensorVal = error "TODO (Complex Float)"
    +    decodeTensorData = error "TODO (Complex Float)"
    +    encodeTensorData = error "TODO (Complex Float)"
    +
    +instance TensorType (Complex Double) where
    +    tensorType _ = DT_COMPLEX128
    +    tensorRefType _ = DT_COMPLEX128
    +    tensorVal = error "TODO (Complex Double)"
    +    decodeTensorData = error "TODO (Complex Double)"
    +    encodeTensorData = error "TODO (Complex Double)"
    +
    +-- | Shape (dimensions) of a tensor.
    +newtype Shape = Shape [Int64] deriving Show
    +
    +instance IsList Shape where
    +    type Item Shape = Int64
    +    fromList = Shape . fromList
    +    toList (Shape ss) = toList ss
    +
    +protoShape :: Lens' TensorShapeProto Shape
    +protoShape = iso protoToShape shapeToProto
    +  where
    +    protoToShape = Shape . fmap (view size) . view dim
    +    shapeToProto (Shape ds) = def & dim .~ fmap (\d -> def & size .~ d) ds
    +
    +
    +class Attribute a where
    +    attrLens :: Lens' AttrValue a
    +
    +instance Attribute Float where
    +    attrLens = f
    +
    +instance Attribute ByteString where
    +    attrLens = s
    +
    +instance Attribute Int64 where
    +    attrLens = i
    +
    +instance Attribute DataType where
    +    attrLens = type'
    +
    +instance Attribute TensorProto where
    +    attrLens = tensor
    +
    +instance Attribute Bool where
    +    attrLens = b
    +
    +instance Attribute Shape where
    +    attrLens = shape . protoShape
    +
    +-- TODO(gnezdo): support generating list(Foo) from [Foo].
    +instance Attribute AttrValue'ListValue where
    +    attrLens = list
    +
    +instance Attribute [DataType] where
    +    attrLens = list . type'
    +
    +instance Attribute [Int64] where
    +    attrLens = list . i
    +
    +-- | A 'Constraint' specifying the possible choices of a 'TensorType'.
    +--
    +-- We implement a 'Constraint' like @OneOf '[Double, Float] a@ by turning the
    +-- natural representation as a conjunction, i.e.,
    +--
    +-- @
    +--    a == Double || a == Float
    +-- @
    +--
    +-- into a disjunction like
    +--
    +-- @
    +--     a \/= Int32 && a \/= Int64 && a \/= ByteString && ...
    +-- @
    +--
    +-- using an enumeration of all the possible 'TensorType's.
    +type OneOf ts a
    +    = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)
    +
    +-- | A 'Constraint' checking that the input is a list of 'TensorType's.
    +-- Helps improve error messages when using 'OneOf'.
    +type family TensorTypes ts :: Constraint where
    +    TensorTypes '[] = ()
    +    TensorTypes (t ': ts) = (TensorType t, TensorTypes ts)
    +
    +-- | A constraint checking that two types are different.
    +type family a /= b :: Constraint where
    +    a /= a = TypeError a ~ ExcludedCase
    +    a /= b = ()
    +
    +-- | Helper types to produce a reasonable type error message when the Constraint
    +-- "a /= a" fails.
    +-- TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.
    +data TypeError a
    +data ExcludedCase
    +
    +-- | An enumeration of all valid 'TensorType's.
    +type AllTensorTypes =
    +    -- NOTE: This list should be kept in sync with
    +    -- TensorFlow.OpGen.dtTypeToHaskell.
    +    -- TODO: Add support for Complex Float/Double.
    +    '[ Float
    +     , Double
    +     , Int8
    +     , Int16
    +     , Int32
    +     , Int64
    +     , Word8
    +     , Word16
    +     , ByteString
    +     , Bool
    +     ]
    +
    +-- | Removes a type from the given list of types.
    +type family Delete a as where
    +    Delete a '[] = '[]
    +    Delete a (a ': as) = Delete a as
    +    Delete a (b ': as) = b ': Delete a as
    +
    +-- | Takes the difference of two lists of types.
    +type family as \\ bs where
    +    as \\ '[] = as
    +    as \\ b ': bs = Delete b as \\ bs
    +
    +-- | A constraint that the type @a@ doesn't appear in the type list @ts@.
    +-- Assumes that @a@ and each of the elements of @ts@ are 'TensorType's.
    +type family NoneOf ts a :: Constraint where
    +    NoneOf '[] a = ()
    +    NoneOf (t ': ts) a = (a /= t, NoneOf ts a)
    +
    + diff --git a/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt b/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt new file mode 100644 index 0000000..73409d2 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt @@ -0,0 +1,626 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | TensorFlow bindings. +-- +-- Please see README.md +@package tensorflow +@version 0.1.0.0 + + +-- | Originally taken from internal proto-lens code. +module TensorFlow.Internal.VarInt + +-- | Decode an unsigned varint. +getVarInt :: Parser Word64 + +-- | Encode a Word64. +putVarInt :: Word64 -> Builder + +module TensorFlow.Internal.FFI +data TensorFlowException +TensorFlowException :: Code -> Text -> TensorFlowException +data Session + +-- | Runs the given action after creating a session with options populated +-- by the given optionSetter. +withSession :: (SessionOptions -> IO ()) -> ((IO () -> IO ()) -> Session -> IO a) -> IO a +extendGraph :: Session -> GraphDef -> IO () +run :: Session -> [(ByteString, TensorData)] -> [ByteString] -> [ByteString] -> IO [TensorData] + +-- | All of the data needed to represent a tensor. +data TensorData +TensorData :: [Int64] -> !DataType -> !(Vector Word8) -> TensorData +[tensorDataDimensions] :: TensorData -> [Int64] +[tensorDataType] :: TensorData -> !DataType +[tensorDataBytes] :: TensorData -> !(Vector Word8) +setSessionConfig :: ConfigProto -> SessionOptions -> IO () +setSessionTarget :: ByteString -> SessionOptions -> IO () + +-- | Returns the serialized OpList of all OpDefs defined in this address +-- space. +getAllOpList :: IO ByteString + +-- | Serializes the given msg and provides it as (ptr,len) argument to the +-- given action. +useProtoAsVoidPtrLen :: (Message msg, Num c) => msg -> (Ptr b -> c -> IO a) -> IO a +instance GHC.Classes.Eq TensorFlow.Internal.FFI.TensorData +instance GHC.Show.Show TensorFlow.Internal.FFI.TensorData +instance GHC.Classes.Eq TensorFlow.Internal.FFI.TensorFlowException +instance GHC.Show.Show TensorFlow.Internal.FFI.TensorFlowException +instance GHC.Exception.Exception TensorFlow.Internal.FFI.TensorFlowException + +module TensorFlow.Types + +-- | The class of scalar types supported by tensorflow. +class TensorType a +tensorType :: TensorType a => a -> DataType +tensorRefType :: TensorType a => a -> DataType +tensorVal :: TensorType a => Lens' TensorProto [a] + +-- | Decode the bytes of a TensorData into a Vector. +decodeTensorData :: TensorType a => TensorData a -> Vector a + +-- | Encode a Vector into a TensorData. +-- +-- The values should be in row major order, e.g., +-- +-- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... +encodeTensorData :: TensorType a => Shape -> Vector a -> TensorData a + +-- | Data about a tensor that is encoded for the TensorFlow APIs. +newtype TensorData a +TensorData :: TensorData -> TensorData a +[unTensorData] :: TensorData a -> TensorData + +-- | Shape (dimensions) of a tensor. +newtype Shape +Shape :: [Int64] -> Shape +protoShape :: Lens' TensorShapeProto Shape +class Attribute a +attrLens :: Attribute a => Lens' AttrValue a + +-- | A Constraint specifying the possible choices of a +-- TensorType. +-- +-- We implement a Constraint like OneOf '[Double, Float] +-- a by turning the natural representation as a conjunction, i.e., +-- +--
    +--   a == Double || a == Float
    +--   
    +-- +-- into a disjunction like +-- +--
    +--   a /= Int32 && a /= Int64 && a /= ByteString && ...
    +--   
    +-- +-- using an enumeration of all the possible TensorTypes. +type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) + +-- | A constraint checking that two types are different. + +-- | Helper types to produce a reasonable type error message when the +-- Constraint "a /= a" fails. TODO(judahjacobson): Use ghc-8's +-- CustomTypeErrors for this. +data TypeError a +data ExcludedCase + +-- | A Constraint checking that the input is a list of +-- TensorTypes. Helps improve error messages when using +-- OneOf. + +-- | A constraint that the type a doesn't appear in the type list +-- ts. Assumes that a and each of the elements of +-- ts are TensorTypes. + +-- | Takes the difference of two lists of types. + +-- | Removes a type from the given list of types. + +-- | An enumeration of all valid TensorTypes. +type AllTensorTypes = '[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool] +instance GHC.Show.Show TensorFlow.Types.Shape +instance TensorFlow.Types.TensorType GHC.Types.Float +instance TensorFlow.Types.TensorType GHC.Types.Double +instance TensorFlow.Types.TensorType GHC.Int.Int32 +instance TensorFlow.Types.TensorType GHC.Int.Int64 +instance TensorFlow.Types.TensorType GHC.Word.Word8 +instance TensorFlow.Types.TensorType GHC.Word.Word16 +instance TensorFlow.Types.TensorType GHC.Int.Int16 +instance TensorFlow.Types.TensorType GHC.Int.Int8 +instance TensorFlow.Types.TensorType Data.ByteString.Internal.ByteString +instance TensorFlow.Types.TensorType GHC.Types.Bool +instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Float) +instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Double) +instance GHC.Exts.IsList TensorFlow.Types.Shape +instance TensorFlow.Types.Attribute GHC.Types.Float +instance TensorFlow.Types.Attribute Data.ByteString.Internal.ByteString +instance TensorFlow.Types.Attribute GHC.Int.Int64 +instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.Types.DataType +instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance TensorFlow.Types.Attribute GHC.Types.Bool +instance TensorFlow.Types.Attribute TensorFlow.Types.Shape +instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance TensorFlow.Types.Attribute [Proto.Tensorflow.Core.Framework.Types.DataType] +instance TensorFlow.Types.Attribute [GHC.Int.Int64] + +module TensorFlow.Output + +-- | A type of graph node which has no outputs. These nodes are valuable +-- for causing side effects when they are run. +newtype ControlNode +ControlNode :: Op -> ControlNode +[unControlNode] :: ControlNode -> Op + +-- | A device that a node can be assigned to. There's a naming convention +-- where the device names are constructed from job and replica names. +newtype Device +Device :: Text -> Device +[deviceName] :: Device -> Text + +-- | The name of a node in the graph. This corresponds to the proto field +-- NodeDef.name. Includes the scope prefix (if any) and a unique +-- identifier (if the node was implicitly named). +newtype NodeName +NodeName :: Text -> NodeName +[unNodeName] :: NodeName -> Text + +-- | The representation of a node in a TensorFlow graph. +data Op + +-- | Properties are fixed, including the device, name, and scope. +Rendered :: !NodeDef -> Op + +-- | Properties are not fixed, and may change depending on which context +-- this op is rendered in. +Unrendered :: !OpDef -> Op + +-- | Traverse on the Unrendered of an Op. +-- +-- Same implementation as _Left. +opUnrendered :: Traversal' Op OpDef + +-- | Op definition. This corresponds somewhat to the NodeDef proto. +data OpDef +OpDef :: !PendingNodeName -> !OpType -> !(Map Text AttrValue) -> [Output] -> [NodeName] -> OpDef +[_opName] :: OpDef -> !PendingNodeName +[_opType] :: OpDef -> !OpType +[_opAttrs] :: OpDef -> !(Map Text AttrValue) +[_opInputs] :: OpDef -> [Output] +[_opControlInputs] :: OpDef -> [NodeName] +opName :: Lens' OpDef PendingNodeName +opType :: Lens' OpDef OpType +opAttr :: Attribute a => Text -> Lens' OpDef a +opInputs :: Lens' OpDef [Output] +opControlInputs :: Lens' OpDef [NodeName] + +-- | The type of op of a node in the graph. This corresponds to the proto +-- field NodeDef.op. +newtype OpType +OpType :: Text -> OpType +[unOpType] :: OpType -> Text +newtype OutputIx +OutputIx :: Int -> OutputIx +[unOutputIx] :: OutputIx -> Int + +-- | An output of a TensorFlow node. +data Output +Output :: !OutputIx -> !Op -> Output +output :: OutputIx -> Op -> Output +outputIndex :: Lens' Output OutputIx +outputOp :: Lens' Output Op + +-- | The name specified for an unrendered Op. If an Op has an ImplicitName, +-- it will be assigned based on the opType plus a unique identifier. Does +-- not contain the "scope" prefix. +data PendingNodeName +ExplicitName :: !Text -> PendingNodeName +ImplicitName :: PendingNodeName +instance GHC.Classes.Ord TensorFlow.Output.Op +instance GHC.Classes.Eq TensorFlow.Output.Op +instance GHC.Show.Show TensorFlow.Output.Output +instance GHC.Classes.Ord TensorFlow.Output.Output +instance GHC.Classes.Eq TensorFlow.Output.Output +instance GHC.Classes.Ord TensorFlow.Output.OpDef +instance GHC.Classes.Eq TensorFlow.Output.OpDef +instance GHC.Show.Show TensorFlow.Output.NodeName +instance GHC.Classes.Ord TensorFlow.Output.NodeName +instance GHC.Classes.Eq TensorFlow.Output.NodeName +instance GHC.Show.Show TensorFlow.Output.PendingNodeName +instance GHC.Classes.Ord TensorFlow.Output.PendingNodeName +instance GHC.Classes.Eq TensorFlow.Output.PendingNodeName +instance Data.String.IsString TensorFlow.Output.Device +instance GHC.Classes.Ord TensorFlow.Output.Device +instance GHC.Classes.Eq TensorFlow.Output.Device +instance GHC.Show.Show TensorFlow.Output.OutputIx +instance GHC.Enum.Enum TensorFlow.Output.OutputIx +instance GHC.Num.Num TensorFlow.Output.OutputIx +instance GHC.Classes.Ord TensorFlow.Output.OutputIx +instance GHC.Classes.Eq TensorFlow.Output.OutputIx +instance GHC.Show.Show TensorFlow.Output.OpType +instance GHC.Classes.Ord TensorFlow.Output.OpType +instance GHC.Classes.Eq TensorFlow.Output.OpType +instance Data.String.IsString TensorFlow.Output.OpType +instance GHC.Show.Show TensorFlow.Output.Device +instance GHC.Show.Show TensorFlow.Output.Op +instance Data.String.IsString TensorFlow.Output.Output + +module TensorFlow.Tensor + +-- | A named output of a TensorFlow operation. +-- +-- The type parameter a is the type of the elements in the +-- Tensor. The parameter v is either Value or +-- Ref, depending on whether the graph is treating this op output +-- as an immutable Value or a stateful Ref (e.g., a +-- variable). Note that a Tensor Ref can be casted into a +-- Tensor Value via value. +data Tensor v a +Tensor :: (TensorKind v) -> Output -> Tensor v a +data Value +data Ref + +-- | This class provides a runtime switch on whether a Tensor should +-- be treated as a Value or as a Ref. +data TensorKind v +ValueKind :: TensorKind Value +RefKind :: TensorKind Ref +tensorKind :: Lens' (Tensor v a) (TensorKind v) +tensorOutput :: Lens' (Tensor v a) Output + +-- | Lens for the attributes of a tensor. +-- +-- Only valid if the tensor has not yet been rendered. If the tensor has +-- been rendered, the traversal will be over nothing (nothing can be read +-- or written). +tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr + +-- | Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a Ref +-- into Value. This behaves like a no-op. +value :: Tensor v a -> Tensor Value a + +-- | A pair of a Tensor and some data that should be fed into that +-- Tensor when running the graph. +data Feed +Feed :: Output -> TensorData -> Feed + +-- | Create a Feed for feeding the given data into a Tensor +-- when running the graph. +-- +-- Note that if a Tensor is rendered, its identity may change; so +-- feeding the rendered Tensor may be different than feeding the +-- original Tensor. +feed :: Tensor v a -> TensorData a -> Feed + +-- | Create a Tensor for a given name. This can be used to reference +-- nodes in a GraphDef that was loaded via addGraphDef. +-- TODO(judahjacobson): add more safety checks here. +tensorFromName :: TensorKind v -> Text -> Tensor v a + +module TensorFlow.Build + +-- | A type of graph node which has no outputs. These nodes are valuable +-- for causing side effects when they are run. +newtype ControlNode +ControlNode :: Op -> ControlNode +[unControlNode] :: ControlNode -> Op +data Unique +explicitName :: Text -> PendingNodeName +implicitName :: PendingNodeName +opDef :: OpType -> OpDef +opDefWithName :: PendingNodeName -> OpType -> OpDef +opName :: Lens' OpDef PendingNodeName +opType :: Lens' OpDef OpType +opAttr :: Attribute a => Text -> Lens' OpDef a +opInputs :: Lens' OpDef [Output] +opControlInputs :: Lens' OpDef [NodeName] +data GraphState + +-- | Render a Tensor, fixing its name, scope, device and control +-- inputs from the Build context. Also renders any dependencies of +-- the Tensor that weren't already rendered. +-- +-- This operation is idempotent; render >=> render === +-- render. However, rendering a (previously un-rendered) +-- Tensor in two different contexts may result in two different +-- Tensors. +render :: Tensor v a -> Build (Tensor v a) + +-- | Render a Tensor and get its node's name. +renderNodeName :: Tensor v a -> Build NodeName +renderedNodeDefs :: Lens' GraphState (Map NodeName NodeDef) + +-- | An action for building nodes in a TensorFlow graph. Used to manage +-- build state internally as part of the Session monad. +data BuildT m a + +-- | An action for building nodes in a TensorFlow graph. +type Build = BuildT Identity + +-- | Registers the given node to be executed before the next run. +addInitializer :: ControlNode -> Build () + +-- | This is Control.Monad.Morph.hoist sans the dependency. +hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b +evalBuildT :: Monad m => BuildT m a -> m a +runBuildT :: BuildT m a -> m (a, GraphState) + +-- | Produce a GraphDef proto representation of the nodes that are rendered +-- in the given Build action. +asGraphDef :: Build a -> GraphDef +addGraphDef :: GraphDef -> Build () + +-- | Get all the initializers that have accumulated so far, and clear that +-- buffer. +flushInitializers :: Monad m => BuildT m [NodeName] + +-- | Get all the NodeDefs that have accumulated so far, and clear that +-- buffer. +flushNodeBuffer :: Monad m => BuildT m [NodeDef] + +-- | Render the given op if it hasn't been rendered already, and return its +-- name. +getOrAddOp :: Op -> Build NodeName + +-- | Add a new node for a given OpDef. This is used for making +-- "stateful" ops which are not safe to dedup (e.g, "variable" and +-- "assign"). +addNewOp :: OpDef -> Build NodeDef + +-- | Render an Output and return a string representation for the +-- TensorFlow foreign APIs. +renderOutput :: Output -> Build Text + +-- | Places all nodes rendered in the given Build action on the same +-- device as the given Tensor (see also withDevice). Make sure +-- that the action has side effects of rendering the desired tensors. A +-- pure return would not have the desired effect. +colocateWith :: Tensor v b -> Build a -> Build a + +-- | Modify some part of the state, run an action, and restore the state +-- after that action is done. +withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b + +-- | Set a device for all nodes rendered in the given Build action +-- (unless further overridden by another use of withDevice). +withDevice :: Maybe Device -> Build a -> Build a + +-- | Prepend a scope to all nodes rendered in the given Build +-- action. +withNameScope :: Text -> Build a -> Build a + +-- | Add control inputs to all nodes rendered in the given Build +-- action. +withNodeDependencies :: Set NodeName -> Build a -> Build a + +-- | Records the given summary action in Build for retrieval with +-- collectAllSummaries. The summary op is required to produce a +-- Summary protocol buffer in string form. For safety, use the +-- pre-composed functions: Logging.scalarSummary and +-- Logging.histogramSummary. +addSummary :: SummaryTensor -> Build () + +-- | Synonym for the tensors that return serialized Summary proto. +type SummaryTensor = Tensor Value ByteString + +-- | Retrieves the summary ops collected thus far. Typically this only +-- happens once, but if buildWithSummary is used repeatedly, the +-- values accumulate. +collectAllSummaries :: Monad m => BuildT m [SummaryTensor] +instance GHC.Base.Monad m => Control.Monad.State.Class.MonadState TensorFlow.Build.GraphState (TensorFlow.Build.BuildT m) +instance Control.Monad.Trans.Class.MonadTrans TensorFlow.Build.BuildT +instance Control.Monad.IO.Class.MonadIO m => Control.Monad.IO.Class.MonadIO (TensorFlow.Build.BuildT m) +instance GHC.Base.Monad m => GHC.Base.Monad (TensorFlow.Build.BuildT m) +instance GHC.Base.Monad m => GHC.Base.Applicative (TensorFlow.Build.BuildT m) +instance GHC.Base.Functor m => GHC.Base.Functor (TensorFlow.Build.BuildT m) +instance GHC.Classes.Ord TensorFlow.Build.PendingNode +instance GHC.Classes.Eq TensorFlow.Build.PendingNode +instance Data.String.IsString TensorFlow.Build.Scope +instance GHC.Classes.Ord TensorFlow.Build.Scope +instance GHC.Classes.Eq TensorFlow.Build.Scope +instance GHC.Enum.Enum TensorFlow.Build.Unique +instance GHC.Classes.Ord TensorFlow.Build.Unique +instance GHC.Classes.Eq TensorFlow.Build.Unique +instance GHC.Show.Show TensorFlow.Build.Scope + +module TensorFlow.BuildOp + +-- | Class of types that can be used as op outputs. +class OpResult a + +-- | Class of types that can be used as op functions. +class BuildOp f + +-- | Starts an operation that returns a structured set of tensors +-- (singletons or tuples). +buildOp :: BuildOp f => OpDef -> f + +-- | Starts an operation that returns a list of tensors. +buildListOp :: BuildOp f => [Int64] -> OpDef -> f + +-- | Returns true if all the integers in each tuple are identical. Throws +-- an error with a descriptive message if not. +eqLengthGuard :: [(String, [(String, Int)])] -> Bool +instance GHC.Show.Show TensorFlow.BuildOp.ResultState +instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2) => TensorFlow.BuildOp.OpResult (a1, a2) +instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3) => TensorFlow.BuildOp.OpResult (a1, a2, a3) +instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4) +instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4, TensorFlow.BuildOp.OpResult a5) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4, a5) +instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4, TensorFlow.BuildOp.OpResult a5, TensorFlow.BuildOp.OpResult a6) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4, a5, a6) +instance TensorFlow.BuildOp.OpResult (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a) +instance TensorFlow.BuildOp.OpResult (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Ref a) +instance TensorFlow.BuildOp.OpResult TensorFlow.Output.ControlNode +instance TensorFlow.BuildOp.OpResult a => TensorFlow.BuildOp.OpResult [a] +instance TensorFlow.BuildOp.BuildOp TensorFlow.Output.ControlNode +instance TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a) +instance TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Ref a) +instance TensorFlow.BuildOp.BuildOp [TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a] +instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2) => TensorFlow.BuildOp.BuildOp (t1, t2) +instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3) => TensorFlow.BuildOp.BuildOp (t1, t2, t3) +instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4) +instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4, TensorFlow.BuildOp.OpResult t5) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4, t5) +instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4, TensorFlow.BuildOp.OpResult t5, TensorFlow.BuildOp.OpResult t6) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4, t5, t6) +instance TensorFlow.BuildOp.OpResult a => TensorFlow.BuildOp.BuildOp (TensorFlow.Build.Build a) +instance TensorFlow.BuildOp.BuildOp f => TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor v a -> f) +instance TensorFlow.BuildOp.BuildOp f => TensorFlow.BuildOp.BuildOp ([TensorFlow.Tensor.Tensor v a] -> f) + +module TensorFlow.Nodes + +-- | Types that contain ops which can be run. +class Nodes t +getNodes :: Nodes t => t -> Build (Set NodeName) + +-- | Types that tensor representations (e.g. Tensor, +-- ControlNode) can be fetched into. +-- +-- Includes collections of tensors (e.g. tuples). +class Nodes t => Fetchable t a +getFetch :: Fetchable t a => t -> Build (Fetch a) + +-- | Fetch action. Keeps track of what needs to be fetched and how to +-- decode the fetched data. +data Fetch a +Fetch :: Set Text -> (Map Text TensorData -> a) -> Fetch a + +-- | Nodes to fetch +[fetches] :: Fetch a -> Set Text + +-- | Function to create an a from the fetched data. +[fetchRestore] :: Fetch a -> Map Text TensorData -> a +nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b +fetchTensorList :: TensorType a => Tensor v a -> Build (Fetch (Shape, [a])) +fetchTensorVector :: TensorType a => Tensor v a -> Build (Fetch (Shape, Vector a)) +newtype Scalar a +Scalar :: a -> Scalar a +[unScalar] :: Scalar a -> a +instance Data.String.IsString a => Data.String.IsString (TensorFlow.Nodes.Scalar a) +instance GHC.Real.RealFrac a => GHC.Real.RealFrac (TensorFlow.Nodes.Scalar a) +instance GHC.Float.RealFloat a => GHC.Float.RealFloat (TensorFlow.Nodes.Scalar a) +instance GHC.Real.Real a => GHC.Real.Real (TensorFlow.Nodes.Scalar a) +instance GHC.Float.Floating a => GHC.Float.Floating (TensorFlow.Nodes.Scalar a) +instance GHC.Real.Fractional a => GHC.Real.Fractional (TensorFlow.Nodes.Scalar a) +instance GHC.Num.Num a => GHC.Num.Num (TensorFlow.Nodes.Scalar a) +instance GHC.Classes.Ord a => GHC.Classes.Ord (TensorFlow.Nodes.Scalar a) +instance GHC.Classes.Eq a => GHC.Classes.Eq (TensorFlow.Nodes.Scalar a) +instance GHC.Show.Show a => GHC.Show.Show (TensorFlow.Nodes.Scalar a) +instance GHC.Base.Functor TensorFlow.Nodes.Fetch +instance GHC.Base.Applicative TensorFlow.Nodes.Fetch +instance (TensorFlow.Nodes.Nodes t1, TensorFlow.Nodes.Nodes t2) => TensorFlow.Nodes.Nodes (t1, t2) +instance (TensorFlow.Nodes.Nodes t1, TensorFlow.Nodes.Nodes t2, TensorFlow.Nodes.Nodes t3) => TensorFlow.Nodes.Nodes (t1, t2, t3) +instance (TensorFlow.Nodes.Fetchable t1 a1, TensorFlow.Nodes.Fetchable t2 a2) => TensorFlow.Nodes.Fetchable (t1, t2) (a1, a2) +instance (TensorFlow.Nodes.Fetchable t1 a1, TensorFlow.Nodes.Fetchable t2 a2, TensorFlow.Nodes.Fetchable t3 a3) => TensorFlow.Nodes.Fetchable (t1, t2, t3) (a1, a2, a3) +instance TensorFlow.Nodes.Nodes t => TensorFlow.Nodes.Nodes [t] +instance TensorFlow.Nodes.Fetchable t a => TensorFlow.Nodes.Fetchable [t] [a] +instance TensorFlow.Nodes.Nodes TensorFlow.Output.ControlNode +instance (a ~ ()) => TensorFlow.Nodes.Fetchable TensorFlow.Output.ControlNode a +instance TensorFlow.Nodes.Nodes (TensorFlow.Tensor.Tensor v a) +instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (Data.Vector.Vector a') +instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (TensorFlow.Nodes.Scalar a') + +module TensorFlow.ControlFlow + +-- | Modify a Build action, such that all new ops rendered in it +-- will depend on the nodes in the first argument. +withControlDependencies :: Nodes t => t -> Build a -> Build a + +-- | Create an op that groups multiple operations. +-- +-- When this op finishes, all ops in the input n have finished. +-- This op has no output. +group :: Nodes t => t -> Build ControlNode + +-- | Returns a Tensor with the same shape and contents as the input. +identity :: TensorType a => Tensor v a -> Tensor v a + +-- | Does nothing. Only useful as a placeholder for control edges. +noOp :: ControlNode + +-- | Returns a Tensor with a given name and the same shape and +-- contents as the input. +-- +-- TODO(judahjacobson): This breaks when used with uninitialize +-- Tensor Refs, since RefIdentity doesn't have +-- SetAllowsUninitializedInput(). Look into whether we can change that +-- op. +named :: TensorType a => Text -> Tensor v a -> Tensor v a + +module TensorFlow.Session +data Session a + +-- | Setting of an option for the session (see +-- runSessionWithOptions). +data SessionOption + +-- | Uses the specified config for the created session. +sessionConfig :: ConfigProto -> SessionOption + +-- | Target can be: "local", ip:port, host:port. The set of supported +-- factories depends on the linked in libraries. REQUIRES +-- "/learningbrain/public:tensorflow_remote" dependency for the +-- binary. +sessionTarget :: ByteString -> SessionOption + +-- | Run Session actions in a new TensorFlow session. +runSession :: Session a -> IO a + +-- | Run Session actions in a new TensorFlow session created with +-- the given option setter actions (sessionTarget, +-- sessionConfig). +runSessionWithOptions :: [SessionOption] -> Session a -> IO a + +-- | Lift a Build action into a Session, including any +-- explicit op renderings. +build :: Build a -> Session a + +-- | Helper combinator for doing something with the result of a +-- Build action. Example usage: +-- +--
    +--   buildAnd run :: Fetchable t a => Build t -> Session a
    +--   
    +buildAnd :: (a -> Session b) -> Build a -> Session b + +-- | Lift a Build action into a Session, including any +-- explicit op renderings. Returns the merged summary ops which can be +-- used for logging, see build for a convenient wrapper. +buildWithSummary :: Build a -> Session (a, [SummaryTensor]) + +-- | Add all pending rendered nodes to the TensorFlow graph and runs any +-- pending initializers. +-- +-- Note that run, runWithFeeds, etc. will all call this function +-- implicitly. +extend :: Session () +addGraphDef :: GraphDef -> Build () + +-- | Run a subgraph t, rendering any dependent nodes that aren't +-- already rendered, and fetch the corresponding values for a. +run :: Fetchable t a => t -> Session a + +-- | Run a subgraph t, rendering any dependent nodes that aren't +-- already rendered, feed the given input values, and fetch the +-- corresponding result values for a. +runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a + +-- | Run a subgraph t, rendering and extending any dependent nodes +-- that aren't already rendered. This behaves like run except that +-- it doesn't do any fetches. +run_ :: Nodes t => t -> Session () + +-- | Run a subgraph t, rendering any dependent nodes that aren't +-- already rendered, feed the given input values, and fetch the +-- corresponding result values for a. This behaves like +-- runWithFeeds except that it doesn't do any fetches. +runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () + +-- | Starts a concurrent thread which evaluates the given Nodes forever +-- until runSession exits or an exception occurs. Graph extension happens +-- synchronously, but the resultant run proceeds as a separate thread. +asyncProdNodes :: Nodes t => t -> Session () +instance Control.Monad.IO.Class.MonadIO TensorFlow.Session.Session +instance GHC.Base.Monad TensorFlow.Session.Session +instance GHC.Base.Applicative TensorFlow.Session.Session +instance GHC.Base.Functor TensorFlow.Session.Session diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html new file mode 100644 index 0000000..1b18c8d --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html @@ -0,0 +1,2059 @@ +TensorFlow.GenOps.Core

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.GenOps.Core

    Synopsis

    Documentation

    _HostRecv Source

    Arguments

    :: TensorType tensor_type 
    => Int64

    send_device_incarnation: The current incarnation of send_device.

    -> Tensor Value tensor_type

    tensor: The tensor to receive.

    Receives the named tensor from send_device on recv_device.

    _HostRecv requires its input on host memory whereas _Recv requires its + input on device memory.

    _Recv Source

    Arguments

    :: TensorType tensor_type 
    => Int64

    send_device_incarnation: The current incarnation of send_device.

    -> Tensor Value tensor_type

    tensor: The tensor to receive.

    Receives the named tensor from send_device on recv_device.

    _Send Source

    Arguments

    :: TensorType t 
    => Int64

    send_device_incarnation: The current incarnation of send_device.

    -> Tensor v1 t

    tensor: The tensor to send.

    -> ControlNode 

    Sends the named tensor from send_device to recv_device.

    _Arg Source

    Arguments

    :: TensorType t 
    => Int64

    index: This argument is the index-th argument of the function.

    -> Tensor Value t

    output: The argument.

    A graph node which represents an argument to a function.

    sparseApplyRMSProp Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    ms: Should be from a Variable().

    -> Tensor v3 t

    mom: Should be from a Variable().

    -> Tensor v4 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v5 t

    rho: Decay rate. Must be a scalar.

    -> Tensor v6 t

    momentum

    -> Tensor v7 t

    epsilon: Ridge term. Must be a scalar.

    -> Tensor v8 t

    grad: The gradient.

    -> Tensor v9 tindices

    indices: A vector of indices into the first dimension of var, ms and mom.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the RMSProp algorithm.

    Note that in dense implement of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implement, ms + and mom will not update in iterations the grad is zero.

    mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

    ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

    applyAdam Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    m: Should be from a Variable().

    -> Tensor v3 t

    v: Should be from a Variable().

    -> Tensor v4 t

    beta1_power: Must be a scalar.

    -> Tensor v5 t

    beta2_power: Must be a scalar.

    -> Tensor v6 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v7 t

    beta1: Momentum factor. Must be a scalar.

    -> Tensor v8 t

    beta2: Momentum factor. Must be a scalar.

    -> Tensor v9 t

    epsilon: Ridge term. Must be a scalar.

    -> Tensor v10 t

    grad: The gradient.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the Adam algorithm.

    lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) + m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t + v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t + variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

    sparseApplyMomentum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Learning rate. Must be a scalar.

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor v6 t

    momentum: Momentum. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update relevant entries in '*var' and '*accum' according to the momentum scheme.

    Set use_nesterov = True if you want to use Nesterov momentum.

    That is for rows we have grad for, we update var and accum as follows:

    accum = accum * momentum + grad + var -= lr * accum

    applyMomentum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 t

    momentum: Momentum. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the momentum scheme. Set use_nesterov = True if you

    want to use Nesterov momentum.

    accum = accum * momentum + grad + var -= lr * accum

    applyFtrl Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    linear: Should be from a Variable().

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v6 t

    l1: L1 regulariation. Must be a scalar.

    -> Tensor v7 t

    l2: L2 regulariation. Must be a scalar.

    -> Tensor v8 t

    lr_power: Scaling factor. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the Ftrl-proximal scheme.

    accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

    sparseApplyAdagradDA Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    gradient_accumulator: Should be from a Variable().

    -> Tensor v3 t

    gradient_squared_accumulator: Should be from a Variable().

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor v6 t

    lr: Learning rate. Must be a scalar.

    -> Tensor v7 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v8 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v9 Int64

    global_step: Training step number. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

    sparseApplyAdagrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Learning rate. Must be a scalar.

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor Value t

    out: Same as "var".

    Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

    That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

    applyProximalAdagrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v4 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v5 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v6 t

    grad: The gradient.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

    accum += grad * grad + prox_v = var - lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

    applyAdagrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the adagrad scheme.

    accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

    applyAdadelta Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    accum_update: Should be from a Variable().

    -> Tensor v4 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v5 t

    rho: Decay factor. Must be a scalar.

    -> Tensor v6 t

    epsilon: Constant factor. Must be a scalar.

    -> Tensor v7 t

    grad: The gradient.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the adadelta scheme.

    accum = rho() * accum + (1 - rho()) * grad.square(); + update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + update_accum = rho() * update_accum + (1 - rho()) * update.square(); + var -= update;

    sparseApplyProximalGradientDescent Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    alpha: Scaling factor. Must be a scalar.

    -> Tensor v3 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v4 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v5 t

    grad: The gradient.

    -> Tensor v6 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor Value t

    out: Same as "var".

    Sparse update '*var' as FOBOS algorithm with fixed learning rate.

    That is for rows we have grad for, we update var as follows: + prox_v = var - alpha * grad + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

    applyProximalGradientDescent Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    alpha: Scaling factor. Must be a scalar.

    -> Tensor v3 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v4 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v5 t

    delta: The change.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' as FOBOS algorithm with fixed learning rate.

    prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

    encodeBase64 Source

    Arguments

    :: Tensor v1 ByteString

    input: Strings to be encoded.

    -> Tensor Value ByteString

    output: Input strings encoded in base64.

    Encode strings into web-safe base64 format.

    Refer to the following article for more information on base64 format: + en.wikipedia.orgwikiBase64. Base64 strings may have padding with '=' at the + end so that the encoded has length multiple of 4. See Padding section of the + link above.

    Web-safe means that the encoder uses - and _ instead of + and /.

    stringSplit Source

    Arguments

    :: Tensor v1 ByteString

    input: 1-D. Strings to split.

    -> Tensor v2 ByteString

    delimiter: 0-D. Delimiter character, or empty string.

    -> (Tensor Value Int64, Tensor Value ByteString, Tensor Value Int64)

    (indices, values, shape)

    • indices: A dense matrix of int64 representing the indices of the sparse tensor.
    • values: A vector of strings corresponding to the splited values.
    • shape: a length-2 vector of int64 representing the shape of the sparse + tensor, where the first value is N and the second value is the maximum number + of tokens in a single input entry.

    Split elements of input based on delimiter into a SparseTensor.

    Let N be the size of source (typically N will be the batch size). Split each + element of input based on delimiter and return a SparseTensor + containing the splitted tokens. Empty tokens are ignored.

    delimiter can be empty or a single character. If delimiter is an empty + string, each element of input is split into individual 1 character strings.

    For example: + N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output + will be

    indices = [0, 0; + 0, 1; + 1, 0; + 1, 1; + 1, 2] + shape = [2, 3] + values = [hello, world, a, b, c]

    stringJoin Source

    Arguments

    :: [Tensor v1 ByteString]

    inputs: A list of string tensors. The tensors must all have the same shape, + or be scalars. Scalars may be mixed in; these will be broadcast to the shape + of non-scalar inputs.

    -> Tensor Value ByteString

    output

    Joins the strings in the given list of string tensors into one tensor;

    with the given separator (default is an empty separator).

    asString Source

    Arguments

    :: (TensorType t, OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value ByteString

    output

    Converts each entry in the given tensor to strings. Supports many numeric

    types and boolean.

    stringToHashBucketStrong Source

    Arguments

    :: Int64

    num_buckets: The number of buckets.

    -> Tensor v1 ByteString

    input: The strings to assign a hash bucket.

    -> Tensor Value Int64

    output: A Tensor of the same shape as the input string_tensor.

    Converts each string in the input Tensor to its hash mod by a number of buckets.

    The hash function is deterministic on the content of the string within the + process. The hash function is a keyed hash function, where attribute key + defines the key of the hash function. key is an array of 2 elements.

    A strong hash is important when inputs may be malicious, e.g. URLs with + additional components. Adversaries could try to make their inputs hash to the + same bucket for a denial-of-service attack or to skew the results. A strong + hash prevents this by making it dificult, if not infeasible, to compute inputs + that hash to the same bucket. This comes at a cost of roughly 4x higher compute + time than tf.string_to_hash_bucket_fast.

    scatterMul Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 tindices

    indices: A tensor of indices into the first dimension of ref.

    -> Tensor v3 t

    updates: A tensor of updated values to multiply to ref.

    -> Tensor Value t

    output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

    Multiplies sparse updates into a variable reference.

    This operation computes

    # Scalar indices + ref[indices, ...] *= updates[...]

    # Vector indices (for each i) + ref[indices[i], ...] *= updates[i, ...]

    # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]

    This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

    Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions multiply.

    Requires `updates.shape = indices.shape + ref.shape[1:]`.

    reduceJoin Source

    Arguments

    :: Tensor v1 ByteString

    inputs: The input to be joined. All reduced indices must have non-zero size.

    -> Tensor v2 Int32

    reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + order specified. Omitting reduction_indices is equivalent to passing + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

    -> Tensor Value ByteString

    output: Has shape equal to that of the input with reduced dimensions removed or + set to `1` depending on keep_dims.

    Joins a string Tensor across the given dimensions.

    Computes the string join across dimensions in the given string Tensor of shape + `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input + strings with the given separator (default: empty string). Negative indices are + counted backwards from the end, with `-1` being equivalent to `n - 1`. Passing + an empty reduction_indices joins all strings in linear index order and outputs + a scalar string.

    For example:

    ``` + # tensor a is [["a", "b"], ["c", "d"]] + tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + tf.reduce_join(a, [0, 1]) ==> ["acbd"] + tf.reduce_join(a, [1, 0]) ==> ["abcd"] + tf.reduce_join(a, []) ==> ["abcd"] + ```

    scatterSub Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 tindices

    indices: A tensor of indices into the first dimension of ref.

    -> Tensor v3 t

    updates: A tensor of updated values to subtract from ref.

    -> Tensor Value t

    output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

    Subtracts sparse updates to a variable reference.

    # Scalar indices + ref[indices, ...] -= updates[...]

    # Vector indices (for each i) + ref[indices[i], ...] -= updates[i, ...]

    # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]

    This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

    Duplicate entries are handled correctly: if multiple indices reference + the same location, their (negated) contributions add.

    Requires `updates.shape = indices.shape + ref.shape[1:]`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterSub.png" alt + /div

    scatterAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 tindices

    indices: A tensor of indices into the first dimension of ref.

    -> Tensor v3 t

    updates: A tensor of updated values to add to ref.

    -> Tensor Value t

    output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

    Adds sparse updates to a variable reference.

    This operation computes

    # Scalar indices + ref[indices, ...] += updates[...]

    # Vector indices (for each i) + ref[indices[i], ...] += updates[i, ...]

    # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

    This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

    Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions add.

    Requires `updates.shape = indices.shape + ref.shape[1:]`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterAdd.png" alt + /div

    scatterUpdate Source

    Arguments

    :: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 tindices

    indices: A tensor of indices into the first dimension of ref.

    -> Tensor v3 t

    updates: A tensor of updated values to store in ref.

    -> Tensor Value t

    output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

    Applies sparse updates to a variable reference.

    This operation computes

    # Scalar indices + ref[indices, ...] = updates[...]

    # Vector indices (for each i) + ref[indices[i], ...] = updates[i, ...]

    # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]

    This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

    If values in ref is to be updated more than once, because there are + duplicate entires in indices, the order at which the updates happen + for each value is undefined.

    Requires `updates.shape = indices.shape + ref.shape[1:]`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterUpdate.png" alt + /div

    assignSub Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 t

    value: The value to be subtracted to the variable.

    -> Tensor Value t

    output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

    Update ref by subtracting value from it.

    This operation outputs "ref" after the update is done. + This makes it easier to chain operations that need to use the reset value.

    assignAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 t

    value: The value to be added to the variable.

    -> Tensor Value t

    output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

    Update ref by adding value to it.

    This operation outputs "ref" after the update is done. + This makes it easier to chain operations that need to use the reset value.

    sparseSegmentMeanGrad Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    grad: gradient propagated to the SparseSegmentMean op.

    -> Tensor v2 tidx

    indices: indices passed to the corresponding SparseSegmentMean op.

    -> Tensor v3 Int32

    segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

    -> Tensor v4 Int32

    output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

    -> Tensor Value t

    output

    Computes gradients for SparseSegmentMean.

    Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

    sparseSoftmax Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 Int64

    sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + SparseTensor, in canonical ordering.

    -> Tensor v2 t

    sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

    -> Tensor v3 Int64

    sp_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor Value t

    output: 1-D. The NNZ values for the result SparseTensor.

    Applies softmax to a batched N-D SparseTensor.

    The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + (where `N >= 2`), and with indices sorted in the canonical lexicographic order.

    This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + logical submatrix with shape `[B, C]`, but with the catch that *the implicitly + zero elements do not participate*. Specifically, the algorithm is equivalent + to the following:

    1. Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + with shape `[B, C]`, along the size-C dimension;
    2. Masks out the original implicitly-zero locations;
    3. Renormalizes the remaining elements.

    Hence, the SparseTensor result has exactly the same non-zero indices and + shape.

    matrixSolve Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix: Shape is `[..., M, M]`.

    -> Tensor v2 t

    rhs: Shape is `[..., M, K]`.

    -> Tensor Value t

    output: Shape is `[..., M, K]`.

    Solves systems of linear equations.

    Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. Rhs is a tensor of shape `[..., M, K]`. The output is + a tensor shape `[..., M, K]`. If adjoint is False then each output matrix + satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + If adjoint is True then each output matrix satisfies + `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.

    selfAdjointEigV2 Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Tensor input of shape `[N, N]`.

    -> (Tensor Value t, Tensor Value t)

    (e, v)

    • e: Eigenvalues. Shape is `[N]`.
    • v: Eigenvectors. Shape is `[N, N]`.

    Computes the eigen decomposition of one or more square self-adjoint matrices.

    Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + input such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.

    ```prettyprint + # a is a tensor. + # e is a tensor of eigenvalues. + # v is a tensor of eigenvectors. + e, v = self_adjoint_eig(a) + e = self_adjoint_eig(a, compute_v=False) + ```

    selfAdjointEig Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Shape is `[..., M, M]`.

    -> Tensor Value t

    output: Shape is `[..., M+1, M]`.

    Computes the Eigen Decomposition of a batch of square self-adjoint matrices.

    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices, with the same constraints as the single matrix + SelfAdjointEig.

    The result is a [..., M+1, M] matrix with [..., 0,:] containing the + eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.

    applyGradientDescent Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    alpha: Scaling factor. Must be a scalar.

    -> Tensor v3 t

    delta: The change.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' by subtracting alpha * delta from it.

    stackPush Source

    Arguments

    :: TensorType t 
    => Tensor v1 ByteString

    handle: The handle to a stack.

    -> Tensor v2 t

    elem: The tensor to be pushed onto the stack.

    -> Tensor Value t

    output: The same tensor as the input elem.

    Push an element onto the stack.

    cholesky Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Shape is `[..., M, M]`.

    -> Tensor Value t

    output: Shape is `[..., M, M]`.

    Computes the Cholesky decomposition of one or more square matrices.

    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices, with the same constraints as the single matrix Cholesky + decomposition above. The output is a tensor of the same shape as the input + containing the Cholesky decompositions for all input submatrices `[..., :, :]`.

    dynamicStitch Source

    Arguments

    :: TensorType t 
    => [Tensor v1 Int32]

    indices

    -> [Tensor v2 t]

    data

    -> Tensor Value t

    merged

    Interleave the values from the `data` tensors into a single tensor.

    Builds a merged tensor such that

    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]

    For example, if each `indices[m]` is scalar or vector, we have

    # Scalar indices + merged[indices[m], ...] = data[m][...]

    # Vector indices + merged[indices[m][i], ...] = data[m][i, ...]

    Each `data[i].shape` must start with the corresponding `indices[i].shape`, + and the rest of `data[i].shape` must be constant w.r.t. i. That is, we + must have `data[i].shape = indices[i].shape + constant`. In terms of this + constant, the output shape is

    merged.shape = [max(indices)] + constant

    Values are merged in order, so if an index appears in both `indices[m][i]` and + `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + merged result.

    For example:

    indices[0] = 6 + indices[1] = [4, 1] + indices[2] = [[5, 2], [0, 3]] + data[0] = [61, 62] + data[1] = [[41, 42], [11, 12]] + data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + [51, 52], [61, 62]]

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/DynamicStitch.png" alt + /div

    readerNumWorkUnitsCompleted Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor Value Int64

    units_completed

    Returns the number of work units this Reader has finished processing.

    readerRead Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor v2 ByteString

    queue_handle: Handle to a Queue, with string work items.

    -> (Tensor Value ByteString, Tensor Value ByteString)

    (key, value)

    • key: A scalar.
    • value: A scalar.

    Returns the next record (key, value pair) produced by a Reader.

    Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file).

    fFT2D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier Transform.

    Compute the 2-dimensional discrete Fourier Transform over the inner-most

    2 dimensions of input.

    fixedLengthRecordReader Source

    Arguments

    :: Int64

    record_bytes

    -> Tensor Value ByteString

    reader_handle: The handle to reference the Reader.

    A Reader that outputs fixed-length records from a file.

    placeholder Source

    Arguments

    :: TensorType dtype 
    => Tensor Value dtype

    output: A placeholder tensor that must be replaced using the feed mechanism.

    A placeholder op for a value that will be fed into the computation.

    N.B. This operation will fail with an error if it is executed. It is + intended as a way to represent a value that will always be fed, and to + provide attrs that enable the fed value to be checked at runtime.

    scalarSummary Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 ByteString

    tags: Tags for the summary.

    -> Tensor v2 t

    values: Same shape as `tags. Values for the summary.

    -> Tensor Value ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Outputs a Summary protocol buffer with scalar values.

    The input tags and values must have the same shape. The generated summary + has a summary value for each tag-value pair in tags and values.

    softmax Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Value t

    softmax: Same shape as logits.

    Computes softmax activations.

    For each batch i and class j we have

    softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

    shardedFilename Source

    Arguments

    :: Tensor v1 ByteString

    basename

    -> Tensor v2 Int32

    shard

    -> Tensor v3 Int32

    num_shards

    -> Tensor Value ByteString

    filename

    Generate a sharded filename. The filename is printf formatted as

    %s-%05d-of-%05d, basename, shard, num_shards.

    _HostSend Source

    Arguments

    :: TensorType t 
    => Int64

    send_device_incarnation: The current incarnation of send_device.

    -> Tensor v1 t

    tensor: The tensor to send.

    -> ControlNode 

    Sends the named tensor from send_device to recv_device.

    _HostSend requires its input on host memory whereas _Send requires its + input on device memory.

    sigmoidGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the gradient of the sigmoid of x wrt its input.

    Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + dy is the corresponding input gradient.

    nonMaxSuppression Source

    Arguments

    :: Tensor v1 Float

    boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

    -> Tensor v2 Float

    scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

    -> Tensor v3 Int32

    max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

    -> Tensor Value Int32

    selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

    Greedily selects a subset of bounding boxes in descending order of score,

    pruning away boxes that have high intersection-over-union (IOU) overlap + with previously selected boxes. Bounding boxes are supplied as + [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + diagonal pair of box corners and the coordinates can be provided as normalized + (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + is agnostic to where the origin is in the coordinate system. Note that this + algorithm is invariant to orthogonal transformations and translations + of the coordinate system; thus translating or reflections of the coordinate + system result in the same boxes being selected by the algorithm.

    The output of this operation is a set of integers indexing into the input + collection of bounding boxes representing the selected boxes. The bounding + box coordinates corresponding to the selected indices can then be obtained + using the tf.gather operation. For example:

    selected_indices = tf.image.non_max_suppression( + boxes, scores, max_output_size, iou_threshold) + selected_boxes = tf.gather(boxes, selected_indices)

    identityReader Source

    Arguments

    :: Tensor Value ByteString

    reader_handle: The handle to reference the Reader.

    A Reader that outputs the queued work as both the key and value.

    To use, enqueue strings in a Queue. ReaderRead will take the front + work string and output (work, work).

    extractGlimpse Source

    Arguments

    :: Tensor v1 Float

    input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

    -> Tensor v2 Int32

    size: A 1-D tensor of 2 elements containing the size of the glimpses + to extract. The glimpse height must be specified first, following + by the glimpse width.

    -> Tensor v3 Float

    offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + the x, y locations of the center of each window.

    -> Tensor Value Float

    glimpse: A tensor representing the glimpses `[batch_size, + glimpse_height, glimpse_width, channels]`.

    Extracts a glimpse from the input tensor.

    Returns a set of windows called glimpses extracted at location + offsets from the input tensor. If the windows only partially + overlaps the inputs, the non overlapping areas will be filled with + random noise.

    The result is a 4-D tensor of shape `[batch_size, glimpse_height, + glimpse_width, channels]`. The channels and batch dimensions are the + same as that of the input tensor. The height and width of the output + windows are specified in the size parameter.

    The argument normalized and centered controls how the windows are built:

    • If the coordinates are normalized but not centered, 0.0 and 1.0 + correspond to the minimum and maximum of each height and width + dimension.
    • If the coordinates are both normalized and centered, they range from
    • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper + left corner, the lower right corner is located at (1.0, 1.0) and the + center is at (0, 0).
    • If the coordinates are not normalized they are interpreted as + numbers of pixels.

    conv3DBackpropInput Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, depth, rows, cols, in_channels]`.

    -> Tensor v2 t

    filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

    -> Tensor v3 t

    out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

    -> Tensor Value t

    output

    Computes the gradients of 3-D convolution with respect to the input.

    matrixSolveLs Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix: Shape is `[..., M, N]`.

    -> Tensor v2 t

    rhs: Shape is `[..., M, K]`.

    -> Tensor v3 Double

    l2_regularizer: Scalar tensor.

    -> Tensor Value t

    output: Shape is `[..., N, K]`.

    Solves one or more linear least-squares problems.

    matrix is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`. + The output is a tensor shape `[..., N, K]` where each output matrix solves + each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] + in the least squares sense.

    matrix and right-hand sides in the batch:

    matrix=\(A in Re^{m times n}\), + rhs=\(B in Re^{m times k}\), + output=\(X in Re^{n times k}\), + l2_regularizer=\(lambda\).

    If fast is True, then the solution is computed by solving the normal + equations using Cholesky decomposition. Specifically, if \(m ge n\) then + \(X = (A^T A + lambda I)^{-1} A^T B\), which solves the least-squares + problem \(X = mathrm{argmin}_{Z in Re^{n times k}} ||A Z - B||_F^2 + + lambda ||Z||_F^2\). If \(m lt n\) then output is computed as + \(X = A^T (A A^T + lambda I)^{-1} B\), which (for \(lambda = 0\)) is the + minimum-norm solution to the under-determined linear system, i.e. + \(X = mathrm{argmin}_{Z in Re^{n times k}} ||Z||_F^2 \), subject to + \(A Z = B\). Notice that the fast path is only numerically stable when + \(A\) is numerically full rank and has a condition number + \(mathrm{cond}(A) lt frac{1}{sqrt{epsilon_{mach}}}\) or\(lambda\) is + sufficiently large.

    If fast is False an algorithm based on the numerically robust complete + orthogonal decomposition is used. This computes the minimum-norm + least-squares solution, even when \(A\) is rank deficient. This path is + typically 6-7 times slower than the fast path. If fast is False then + l2_regularizer is ignored.

    rGBToHSV Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

    -> Tensor Value t

    output: images converted to HSV.

    Converts one or more images from RGB to HSV.

    Outputs a tensor of the same shape as the images tensor, containing the HSV + value of the pixels. The output is only well defined if the value in images + are in `[0,1]`.

    `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + corresponds to pure red, hue 13 is pure green, and 23 is pure blue.

    decodeGif Source

    Arguments

    :: Tensor v1 ByteString

    contents: 0-D. The GIF-encoded image.

    -> Tensor Value Word8

    image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

    Decode the first frame of a GIF-encoded image to a uint8 tensor.

    GIF with frame or transparency compression are not supported + convert animated GIF from compressed to uncompressed by:

    convert $src.gif -coalesce $dst.gif

    adjustContrast Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
    => Tensor v1 t

    images

    -> Tensor v2 Float

    contrast_factor

    -> Tensor v3 Float

    min_value

    -> Tensor v4 Float

    max_value

    -> Tensor Value Float

    output

    Deprecated. Disallowed in GraphDef version >= 2.

    depthToSpace Source

    Arguments

    :: TensorType t 
    => Int64

    block_size: The size of the spatial block, same as in Space2Depth.

    -> Tensor v1 t

    input

    -> Tensor Value t

    output

    DepthToSpace for tensors of type T.

    Rearranges data from depth into blocks of spatial data. + This is the reverse transformation of SpaceToDepth. More specifically, + this op outputs a copy of the input tensor where values from the depth + dimension are moved in spatial blocks to the height and width dimensions. + The attr block_size indicates the input block size and how the data is moved.

    • Chunks of data of size `block_size * block_size` from depth are rearranged + into non-overlapping blocks of size `block_size x block_size`
    • The width the output tensor is `input_depth * block_size`, whereas the + height is `input_height * block_size`.
    • The depth of the input tensor must be divisible by + `block_size * block_size`.

    That is, assuming the input is in the shape: + `[batch, height, width, depth]`, + the shape of the output will be: + `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`

    This operation requires that the input tensor be of rank 4, and that + block_size be >=1 and that `block_size * block_size` be a divisor of the + input depth.

    This operation is useful for resizing the activations between convolutions + (but keeping all data), e.g. instead of pooling. It is also useful for training + purely convolutional models.

    For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:

    ```prettyprint + x = [[[[1, 2, 3, 4]]]]

    ```

    This operation will output a tensor of shape `[1, 2, 2, 1]`:

    ```prettyprint + [[[[1], [2]], + [[3], [4]]]] + ```

    Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + the corresponding output will have 2x2 elements and will have a depth of + 1 channel (1 = `4 / (block_size * block_size)`). + The output element shape is `[2, 2, 1]`.

    For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.

    ```prettyprint + x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + ```

    This operation, for block size of 2, will return the following tensor of shape + `[1, 2, 2, 3]`

    ```prettyprint + [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]]

    ```

    Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:

    ```prettyprint + x = [[[[1, 2, 3, 4], + [5, 6, 7, 8]], + [[9, 10, 11, 12], + [13, 14, 15, 16]]]] + ```

    the operator will return the following tensor of shape `[1 4 4 1]`:

    ```prettyprint + x = [[ [1], [2], [5], [6]], + [ [3], [4], [7], [8]], + [ [9], [10], [13], [14]], + [ [11], [12], [15], [16]]]

    ```

    batchMatrixSolve Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix

    -> Tensor v2 t

    rhs

    -> Tensor Value t

    output

    erfc Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the complementary error function of x element-wise.

    resizeBilinearGrad Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 Float

    grads: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 t

    original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + The image tensor that was resized.

    -> Tensor Value t

    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + Gradients with respect to the input image. Input image must have been + float or double.

    Computes the gradient of bilinear interpolation.

    fact Source

    Arguments

    :: Tensor Value ByteString

    fact

    Output a fact about factorials.

    deleteSessionTensor Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle for a tensor stored in the session state.

    -> ControlNode 

    Delete the tensor specified by its handle in the session.

    logicalOr Source

    Arguments

    :: Tensor v1 Bool

    x

    -> Tensor v2 Bool

    y

    -> Tensor Value Bool

    z

    Returns the truth value of x OR y element-wise.

    • NOTE*: LogicalOr supports broadcasting. More about broadcasting + here

    getSessionTensor Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    handle: The handle for a tensor stored in the session state.

    -> Tensor Value dtype

    value: The tensor for the given handle.

    Get the value of the tensor specified by its handle.

    batchMatrixInverse Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    shardedFilespec Source

    Arguments

    :: Tensor v1 ByteString

    basename

    -> Tensor v2 Int32

    num_shards

    -> Tensor Value ByteString

    filename

    Generate a glob pattern matching all sharded file names.

    decodeBase64 Source

    Arguments

    :: Tensor v1 ByteString

    input: Base64 strings to decode.

    -> Tensor Value ByteString

    output: Decoded strings.

    Decode web-safe base64-encoded strings.

    Input may or may not have padding at the end. See EncodeBase64 for padding. + Web-safe means that input must use - and _ instead of + and /.

    getSessionHandle Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    value: The tensor to be stored.

    -> Tensor Value ByteString

    handle: The handle for the tensor stored in the session state.

    Store the input tensor in the state of the current session.

    initializeTable Source

    Arguments

    :: (TensorType tkey, TensorType tval) 
    => Tensor v1 ByteString

    table_handle: Handle to a table which will be initialized.

    -> Tensor v2 tkey

    keys: Keys of type Tkey.

    -> Tensor v3 tval

    values: Values of type Tval.

    -> ControlNode 

    Table initializer that takes two tensors for keys and values respectively.

    tan Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes tan of x element-wise.

    tanh Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes hyperbolic tangent of x element-wise.

    applyAdagradDA Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    gradient_accumulator: Should be from a Variable().

    -> Tensor v3 t

    gradient_squared_accumulator: Should be from a Variable().

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v6 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v7 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v8 Int64

    global_step: Training step number. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the proximal adagrad scheme.

    stringToHashBucket Source

    Arguments

    :: Int64

    num_buckets: The number of buckets.

    -> Tensor v1 ByteString

    string_tensor

    -> Tensor Value Int64

    output: A Tensor of the same shape as the input string_tensor.

    Converts each string in the input Tensor to its hash mod by a number of buckets.

    The hash function is deterministic on the content of the string within the + process.

    Note that the hash function may change from time to time. + This functionality will be deprecated and it's recommended to use + `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.

    eluGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding Elu operation.

    -> Tensor v2 t

    outputs: The outputs of the corresponding Elu operation.

    -> Tensor Value t

    backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + gradients otherwise.

    Computes gradients for the exponential linear (Elu) operation.

    fractionalAvgPoolGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
    => Tensor v1 Int64

    orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

    -> Tensor v2 t

    out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_avg_pool.

    -> Tensor v3 Int64

    row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

    -> Tensor v4 Int64

    col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

    -> Tensor Value t

    output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

    Computes gradient of the FractionalAvgPool function.

    Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + out_backprop to those indices that form the same pooling cell. Therefore, we + just need to know the shape of original input tensor, instead of the whole + tensor.

    matrixTriangularSolve Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix: Shape is `[..., M, M]`.

    -> Tensor v2 t

    rhs: Shape is `[..., M, K]`.

    -> Tensor Value t

    output: Shape is `[..., M, K]`.

    Solves systems of linear equations with upper or lower triangular matrices by

    backsubstitution.

    matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + square matrices. If lower is True then the strictly upper triangular part + of each inner-most matrix is assumed to be zero and not accessed. + If lower is False then the strictly lower triangular part of each inner-most + matrix is assumed to be zero and not accessed. + rhs is a tensor of shape `[..., M, K]`.

    The output is a tensor of shape `[..., M, K]`. If adjoint is + True then the innermost matrices in output` satisfy matrix equations + `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + If adjoint is False then the strictly then the innermost matrices in + output satisfy matrix equations + `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

    editDistance Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int64

    hypothesis_indices: The indices of the hypothesis list SparseTensor. + This is an N x R int64 matrix.

    -> Tensor v2 t

    hypothesis_values: The values of the hypothesis list SparseTensor. + This is an N-length vector.

    -> Tensor v3 Int64

    hypothesis_shape: The shape of the hypothesis list SparseTensor. + This is an R-length vector.

    -> Tensor v4 Int64

    truth_indices: The indices of the truth list SparseTensor. + This is an M x R int64 matrix.

    -> Tensor v5 t

    truth_values: The values of the truth list SparseTensor. + This is an M-length vector.

    -> Tensor v6 Int64

    truth_shape: truth indices, vector.

    -> Tensor Value Float

    output: A dense float tensor with rank R - 1.

    For the example input:

    // hypothesis represents a 2x1 matrix with variable-length values: + // (0,0) = ["a"] + // (1,0) = ["b"] + hypothesis_indices = [[0, 0, 0], + [1, 0, 0]] + hypothesis_values = ["a", "b"] + hypothesis_shape = [2, 1, 1]

    // truth represents a 2x2 matrix with variable-length values: + // (0,0) = [] + // (0,1) = ["a"] + // (1,0) = ["b", "c"] + // (1,1) = ["a"] + truth_indices = [[0, 1, 0], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0]] + truth_values = ["a", "b", "c", "a"] + truth_shape = [2, 2, 2] + normalize = true

    The output will be:

    // output is a 2x2 matrix with edit distances normalized by truth lengths. + output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis + [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

    Computes the (possibly normalized) Levenshtein Edit Distance.

    The inputs are variable-length sequences provided by SparseTensors + (hypothesis_indices, hypothesis_values, hypothesis_shape) + and + (truth_indices, truth_values, truth_shape).

    The inputs are:

    barrierIncompleteSize Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a barrier.

    -> Tensor Value Int32

    size: The number of incomplete elements (i.e. those with some of their value + components not set) in the barrier.

    Computes the number of incomplete elements in the given barrier.

    threadUnsafeUnigramCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to randomly sample per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Int64

    range_max: The sampler will sample integers from the interval [0, range_max).

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a learned unigram distribution.

    See explanations of candidate sampling and the data formats at + go/candidate-sampling.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    barrierReadySize Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a barrier.

    -> Tensor Value Int32

    size: The number of complete elements (i.e. those with all of their value + components set) in the barrier.

    Computes the number of complete elements in the given barrier.

    barrierClose Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a barrier.

    -> ControlNode 

    Closes the given barrier.

    This operation signals that no more new elements will be inserted in the + given barrier. Subsequent InsertMany that try to introduce a new key will fail. + Subsequent InsertMany operations that just add missing components to already + existing elements will continue to succeed. Subsequent TakeMany operations will + continue to succeed if sufficient completed elements remain in the barrier. + Subsequent TakeMany operations that would block will fail immediately.

    textLineReader Source

    Arguments

    :: Tensor Value ByteString

    reader_handle: The handle to reference the Reader.

    A Reader that outputs the lines of a file delimited by '\n'.

    fFT3D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their 3D Fourier Transform.

    Compute the 3-dimensional discrete Fourier Transform over the inner-most 3

    dimensions of input.

    refExit Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the parent frame.

    -> Tensor Value t

    output: The same tensor as `data`.

    Exits the current frame to its parent frame.

    Exit makes its input `data` available to the parent frame.

    exp Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes exponential of x element-wise. \(y = e^x\).

    restoreSlice Source

    Arguments

    :: TensorType dt 
    => Tensor v1 ByteString

    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

    -> Tensor v2 ByteString

    tensor_name: Must have a single element. The name of the tensor to be + restored.

    -> Tensor v3 ByteString

    shape_and_slice: Scalar. The shapes and slice specifications to use when + restoring a tensors.

    -> Tensor Value dt

    tensor: The restored tensor.

    Restores a tensor from checkpoint files.

    This is like Restore except that restored tensor can be listed as filling + only a slice of a larger tensor. shape_and_slice specifies the shape of the + larger tensor and the slice that the restored tensor covers.

    The shape_and_slice input has the same format as the + elements of the shapes_and_slices input of the SaveSlices op.

    conj Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    Returns the complex conjugate of a complex number.

    Given a tensor input of complex numbers, this operation returns a tensor of + complex numbers that are the complex conjugate of each element in input. The + complex numbers in input must be of the form \(a + bj\), where *a* is the + real part and *b* is the imaginary part.

    The complex conjugate returned by this operation is of the form \(a - bj\).

    For example:

    ``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + ```

    resizeNearestNeighborGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    grads: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 Int32

    size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + original input size.

    -> Tensor Value t

    output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + with respect to the input image.

    Computes the gradient of nearest neighbor interpolation.

    tensorArrayClose Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

    -> ControlNode 

    Delete the TensorArray from its resource container. This enables

    the user to close and release the resource in the middle of a step/run.

    atan Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes atan of x element-wise.

    tensorArraySize Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

    -> Tensor v2 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value Int32

    size: The current size of the TensorArray.

    Get the current size of the TensorArray.

    tensorArrayConcat Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> (Tensor Value dtype, Tensor Value Int64)

    (value, lengths)

    • value: All of the elements in the TensorArray, concatenated along the first + axis.
    • lengths: A vector of the row sizes of the original T elements in the + value output. In the example above, this would be the values: + `(n1, n2, ..., n(T-1))`.

    Concat the elements from the TensorArray into value value.

    Takes T elements of shapes

    ``` + (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + ```

    and concatenates them into a Tensor of shape:

    ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```

    All elements must have the same shape (excepting the first dimension).

    lRN Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t) 
    => Tensor v1 t

    input: 4-D.

    -> Tensor Value t

    output

    Local Response Normalization.

    The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last + dimension), and each vector is normalized independently. Within a given vector, + each component is divided by the weighted, squared sum of inputs within + depth_radius. In detail,

    sqr_sum[a, b, c, d] = + sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + output = input / (bias + alpha * sqr_sum) ** beta

    For details, see Krizhevsky et al., ImageNet classification with deep + convolutional neural networks (NIPS 2012).

    stringToHashBucketFast Source

    Arguments

    :: Int64

    num_buckets: The number of buckets.

    -> Tensor v1 ByteString

    input: The strings to assign a hash bucket.

    -> Tensor Value Int64

    output: A Tensor of the same shape as the input string_tensor.

    Converts each string in the input Tensor to its hash mod by a number of buckets.

    The hash function is deterministic on the content of the string within the + process and will never change. However, it is not suitable for cryptography. + This function may be used when CPU time is scarce and inputs are trusted or + unimportant. There is a risk of adversaries constructing inputs that all hash + to the same bucket. To prevent this problem, use a strong hash function with + `tf.string_to_hash_bucket_strong`.

    tensorArrayPack Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value dtype

    value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

    Pack the elements from the TensorArray into output value.

    • *WARNING: This op is deprecated.**

    Instead of this op, use TensorArrayGather with + `indices = RangeOp(0, TensorArraySizeOp)`.

    All elements must have the same shape.

    concatOffset Source

    Arguments

    :: Tensor v1 Int32

    concat_dim: The dimension along which to concatenate.

    -> [Tensor v2 Int32]

    shape: The N int32 vectors representing shape of tensors being concatenated.

    -> [Tensor Value Int32]

    offset: The N int32 vectors representing the starting offset + of input tensors within the concatenated output.

    This is typically used by gradient computations for a concat operation.

    Computes offsets of concat inputs within its output.

    For example:

    ```prettyprint + # x is [2, 2, 7] + # y is [2, 3, 7] + # z is [2, 5, 7] + concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] + ```

    refEnter Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the child frame.

    -> Tensor Value t

    output: The same tensor as `data`.

    Creates or finds a child frame, and makes `data` available to the child frame.

    The unique frame_name is used by the Executor to identify frames. If + is_constant is true, output is a constant in the child frame; otherwise + it may be changed in the child frame. At most parallel_iterations iterations + are run in parallel in the child frame.

    softsign Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes softsign: `features / (abs(features) + 1)`.

    tensorArrayWrite Source

    Arguments

    :: TensorType t 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Int32

    index: The position to write to inside the TensorArray.

    -> Tensor v3 t

    value: The tensor to write to the TensorArray.

    -> Tensor v4 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value Float

    flow_out: A float scalar that enforces proper chaining of operations.

    Push an element onto the tensor_array.

    diag Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    diagonal: Rank k tensor where k is at most 3.

    -> Tensor Value t

    output

    Returns a diagonal tensor with a given diagonal values.

    Given a diagonal, this operation returns a tensor with the diagonal and + everything else padded with zeros. The diagonal is computed as follows:

    Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of + rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:

    `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.

    For example:

    ```prettyprint + # diagonal is [1, 2, 3, 4] + tf.diag(diagonal) ==> [[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]] + ```

    matrixDiagPart Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Rank k tensor where `k >= 2` and the last two dimensions are equal.

    -> Tensor Value t

    diagonal: The extracted diagonal(s) having shape + `diagonal.shape = input.shape[:-1]`.

    Returns the batched diagonal part of a batched tensor.

    This operation returns a tensor with the diagonal part + of the batched input. The diagonal part is computed as follows:

    Assume input has k dimensions `[I, J, K, ..., N, N]`, then the output is a + tensor of rank `k - 1` with dimensions `[I, J, K, ..., N]` where:

    `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.

    The input must be at least a matrix.

    For example:

    ```prettyprint + # input is [[[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]], + [[5, 0, 0, 0] + [0, 6, 0, 0] + [0, 0, 7, 0] + [0, 0, 0, 8]]]

    and input.shape = (2, 4, 4)

    tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]

    which has shape (2, 4) + ```

    queueSize Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a queue.

    -> Tensor Value Int32

    size: The number of elements in the given queue.

    Computes the number of elements in the given queue.

    decodePng Source

    Arguments

    :: (TensorType dtype, OneOf `[Word16, Word8]` dtype) 
    => Tensor v1 ByteString

    contents: 0-D. The PNG-encoded image.

    -> Tensor Value dtype

    image: 3-D with shape `[height, width, channels]`.

    Decode a PNG-encoded image to a uint8 or uint16 tensor.

    The attr channels indicates the desired number of color channels for the + decoded image.

    Accepted values are:

    • 0: Use the number of channels in the PNG-encoded image.
    • 1: output a grayscale image.
    • 3: output an RGB image.
    • 4: output an RGBA image.

    If needed, the PNG-encoded image is transformed to match the requested number + of color channels.

    ceil Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Returns element-wise smallest integer in not less than x.

    priorityQueue Source

    Arguments

    :: Tensor Value ByteString

    handle: The handle to the queue.

    A queue that produces elements sorted by the first component value.

    Note that the PriorityQueue requires the first component of any element + to be a scalar int64, in addition to the other elements declared by + component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + entry in their input (resp. output) lists.

    placeholderWithDefault Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 dtype

    input: The default value to produce when output is not fed.

    -> Tensor Value dtype

    output: A placeholder tensor that defaults to input if it is not fed.

    A placeholder op that passes though input when its output is not fed.

    cropAndResizeGradImage Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 Float

    grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

    -> Tensor v2 Float

    boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

    -> Tensor v3 Int32

    box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

    -> Tensor v4 Int32

    image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + containing the original image size. Both image_height and image_width need + to be positive.

    -> Tensor Value t

    output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

    Computes the gradient of the crop_and_resize op wrt the input image tensor.

    readerReset Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> ControlNode 

    Restore a Reader to its initial clean state.

    extractImagePatches Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

    -> Tensor Value t

    patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + ksize_cols * depth]` containing image patches with size + `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

    Extract patches from images and put them in the "depth" output dimension.

    batchMatrixSetDiag Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor v2 t

    diagonal

    -> Tensor Value t

    output

    stackClose Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a stack.

    -> ControlNode 

    Delete the stack from its resource container.

    quantizeAndDequantize Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Tensor to quantize and then dequantize.

    -> Tensor Value t

    output

    Quantizes then dequantizes a tensor.

    This op simulates the precision loss from the quantized forward pass by: + 1. Quantizing the tensor to fixed point numbers, which should match the target + quantization method when it is used in inference. + 2. Dequantizing it back to floating point numbers for the following ops, most + likely matmul.

    There are different ways to quantize. This version does not use the full range + of the output type, choosing to elide the lowest possible value for symmetry + (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit + quantization), so that 0.0 maps to 0.

    To perform this op, we first find the range of values in our tensor. The range + we use is always centered on 0, so we find m such that

    1. m = max(abs(input_min), abs(input_max)) if range_given is true,
    2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.

    Our input tensor range is then [-m, m].

    Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. + If signed_input is true, this is

    min_fixed, max_fixed
    =
    -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1
    .

    Otherwise, if signed_input is false, the fixed-point range is

    min_fixed, max_fixed
    = [0, (1 << num_bits) - 1].

    From this we compute our scaling factor, s:

    s = (max_fixed - min_fixed) / (2 * m).

    Now we can quantize and dequantize the elements of our tensor. An element e + is transformed into e':

    e' = (e * s).round_to_nearest() / s.

    Note that we have a different number of buckets in the signed vs. unsigned + cases. For example, if num_bits == 8, we get 254 buckets in the signed case + vs. 255 in the unsigned case.

    For example, suppose num_bits = 8 and m = 1. Then

    min_fixed, max_fixed
    = [-127, 127], and + s = (127 + 127) / 2 = 127.

    Given the vector {-1, -0.5, 0, 0.3}, this is quantized to + {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.

    isNan Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value Bool

    y

    Returns which elements of x are NaN.

    where' Source

    Arguments

    :: Tensor v1 Bool

    input

    -> Tensor Value Int64

    index

    Returns locations of true values in a boolean tensor.

    This operation returns the coordinates of true elements in input. The + coordinates are returned in a 2-D tensor where the first dimension (rows) + represents the number of true elements, and the second dimension (columns) + represents the coordinates of the true elements. Keep in mind, the shape of + the output tensor can vary depending on how many true values there are in + input. Indices are output in row-major order.

    For example:

    ```prettyprint + # input tensor is [[True, False] + # [True, False]] + # input has two true values, so output has two coordinates. + # input has rank of 2, so coordinates have two indices. + where(input) ==> [[0, 0], + [1, 0]]

    # input tensor is [[[True, False] + # [True, False]] + # [[False, True] + # [False, True]] + # [[False, False] + # [False, True]]] + # input has 5 true values, so output has 5 coordinates. + # input has rank of 3, so coordinates have three indices. + where(input) ==> [[0, 0, 0], + [0, 1, 0], + [1, 0, 1], + [1, 1, 1], + [2, 1, 1]] + ```

    listDiff Source

    Arguments

    :: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
    => Tensor v1 t

    x: 1-D. Values to keep.

    -> Tensor v2 t

    y: 1-D. Values to remove.

    -> (Tensor Value t, Tensor Value out_idx)

    (out, idx)

    • out: 1-D. Values present in x but not in y.
    • idx: 1-D. Positions of x values preserved in out.

    Computes the difference between two lists of numbers or strings.

    Given a list x and a list y, this operation returns a list out that + represents all values that are in x but not in y. The returned list out + is sorted in the same order that the numbers appear in x (duplicates are + preserved). This operation also returns a list idx that represents the + position of each out element in x. In other words:

    `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`

    For example, given this input:

    ```prettyprint + x = [1, 2, 3, 4, 5, 6] + y = [1, 3, 5] + ```

    This operation would return:

    ```prettyprint + out ==> [2, 4, 6] + idx ==> [1, 3, 5] + ```

    stridedSlice Source

    Arguments

    :: (TensorType index, OneOf `[Int32, Int64]` index, TensorType t) 
    => Tensor v1 t

    input

    -> Tensor v2 index

    begin: `begin[i]` specifies the offset into the ith dimension of + input to slice from.

    -> Tensor v3 index

    end: `end[i]` specifies the first offset into the ith dimension of + input that will not be extracted. Out or range values are + clamped to `[0,dim[i]) if slice[i] > 0` or `[-1,dim[i]-1]` + `if slice[i] < 0`

    -> Tensor v4 index

    strides: `strides[i]` specifies the increment in the ith dimension + after extracting a given element. Negative indices will reverse + the original order. Out or range values are + clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

    -> Tensor Value t

    output

    Return a strided slice from input.

    The output tensor is a tensor with dimensions implied by begin, + end, and strides, whose values are extracted from begin.

    Specifically, the result tensor at index `(i[0], i[1], ..., i[n-1])` + will obtain the value `input[begin[0] + i[0] * stride[0], ..., ` + `begin[n-1] + i[n-1] * stride[n-1])]`.

    • Requirements*: + `0 != strides[i] for i in [0, n)`

    randomShuffleQueue Source

    Arguments

    :: Tensor Value ByteString

    handle: The handle to the queue.

    A queue that randomizes the order of elements.

    tileGrad Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor v2 Int32

    multiples

    -> Tensor Value t

    output

    Returns the gradient of Tile.

    Since Tile takes an input and repeats the input multiples times + along each dimension, TileGrad takes in multiples and aggregates + each repeated tile of input into output.

    stridedSliceAssign Source

    Arguments

    :: (TensorType index, OneOf `[Int32, Int64]` index, TensorType t) 
    => Tensor v1 t

    ref

    -> Tensor v2 index

    begin

    -> Tensor v3 index

    end

    -> Tensor v4 index

    strides

    -> Tensor v5 t

    value

    -> Tensor Value t

    output_ref

    Assign value to the sliced l-value reference of ref.

    The values of value are assigned to the positions in the variable + ref that are selected by the slice parameters. The slice parameters + `begin, end, strides, etc. work exactly as in StridedSlice.

    NOTE this op currently does not support broadcasting and so value's + shape must be exactly the shape produced by the slice of ref.

    reshape Source

    Arguments

    :: (TensorType t, TensorType tshape, OneOf `[Int32, Int64]` tshape) 
    => Tensor v1 t

    tensor

    -> Tensor v2 tshape

    shape: Defines the shape of the output tensor.

    -> Tensor Value t

    output

    Reshapes a tensor.

    Given tensor, this operation returns a tensor that has the same values + as tensor with shape shape.

    If one component of shape is the special value -1, the size of that dimension + is computed so that the total size remains constant. In particular, a shape + of `[-1]` flattens into 1-D. At most one component of shape can be -1.

    If shape is 1-D or higher, then the operation returns a tensor with shape + shape filled with the values of tensor. In this case, the number of elements + implied by shape must be the same as the number of elements in tensor.

    For example:

    ```prettyprint + # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] + # tensor t has shape [9] + reshape(t, [3, 3]) ==> [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]

    # tensor t is [[[1, 1], [2, 2]], + # [[3, 3], [4, 4]]] + # tensor t has shape [2, 2, 2] + reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + [3, 3, 4, 4]]

    # tensor t is [[[1, 1, 1], + # [2, 2, 2]], + # [[3, 3, 3], + # [4, 4, 4]], + # [[5, 5, 5], + # [6, 6, 6]]] + # tensor t has shape [3, 2, 3] + # pass '[-1]' to flatten t + reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

    # -1 can also be used to infer the shape

    # -1 is inferred to be 9: + reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 2: + reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 3: + reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + [2, 2, 2], + [3, 3, 3]], + [[4, 4, 4], + [5, 5, 5], + [6, 6, 6]]]

    # tensor t is [7] + # shape `[]` reshapes to a scalar + reshape(t, []) ==> 7 + ```

    fIFOQueue Source

    Arguments

    :: Tensor Value ByteString

    handle: The handle to the queue.

    A queue that produces elements in first-in first-out order.

    learnedUnigramCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to randomly sample per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Int64

    range_max: The sampler will sample integers from the interval [0, range_max).

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a learned unigram distribution.

    See explanations of candidate sampling and the data formats at + go/candidate-sampling.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    fractionalAvgPool Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64)

    (output, row_pooling_sequence, col_pooling_sequence)

    • output: output tensor after fractional avg pooling.
    • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
    • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

    Performs fractional average pooling on the input.

    Fractional average pooling is similar to Fractional max pooling in the pooling + region generation step. The only difference is that after pooling regions are + generated, a mean operation is performed instead of a max operation in each + pooling region.

    randomCrop Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
    => Tensor v1 t

    image: 3-D of shape `[height, width, channels]`.

    -> Tensor v2 Int64

    size: 1-D of length 2 containing: crop_height, crop_width..

    -> Tensor Value t

    output: 3-D of shape `[crop_height, crop_width, channels].`

    Randomly crop image.

    size is a 1-D int64 tensor with 2 elements representing the crop height and + width. The values must be non negative.

    This Op picks a random location in image and crops a height by width + rectangle from that location. The random location is picked so the cropped + area will fit inside the original image.

    _HostCast Source

    Arguments

    :: (TensorType dstT, TensorType srcT) 
    => Tensor v1 srcT

    x

    -> Tensor Value dstT

    y

    Cast x of type SrcT to y of DstT.

    _HostCast requires its input and produces its output in host memory.

    queueClose Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to a queue.

    -> ControlNode 

    Closes the given queue.

    This operation signals that no more elements will be enqueued in the + given queue. Subsequent Enqueue(Many) operations will fail. + Subsequent Dequeue(Many) operations will continue to succeed if + sufficient elements remain in the queue. Subsequent Dequeue(Many) + operations that would block will fail immediately.

    slice Source

    Arguments

    :: (TensorType index, OneOf `[Int32, Int64]` index, TensorType t) 
    => Tensor v1 t

    input

    -> Tensor v2 index

    begin: begin[i] specifies the offset into the ith dimension of + input to slice from.

    -> Tensor v3 index

    size: size[i] specifies the number of elements of the ith dimension + of input to slice. If size[i] is -1, all remaining elements in dimension + i are included in the slice (i.e. this is equivalent to setting + size[i] = input.dim_size(i) - begin[i]).

    -> Tensor Value t

    output

    Return a slice from input.

    The output tensor is a tensor with dimensions described by size + whose values are extracted from input starting at the offsets in + begin.

    • Requirements*: + 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)

    stridedSliceGrad Source

    Arguments

    :: (TensorType index, OneOf `[Int32, Int64]` index, TensorType t) 
    => Tensor v1 index

    shape

    -> Tensor v2 index

    begin

    -> Tensor v3 index

    end

    -> Tensor v4 index

    strides

    -> Tensor v5 t

    dy

    -> Tensor Value t

    output

    Returns the gradient of StridedSlice.

    Since StridedSlice cuts out pieces of its input which is size + shape, its gradient will have the same shape (which is passed here + as shape). The gradient will be zero in any element that the slice + does not select.

    Arguments are the same as StridedSliceGrad with the exception that + dy is the input gradient to be propagated and shape is the + shape of StridedSlice's input.

    sparseTensorDenseAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 tindices

    a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

    -> Tensor v2 t

    a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

    -> Tensor v3 tindices

    a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

    -> Tensor v4 t

    b: ndims-D Tensor. With shape a_shape.

    -> Tensor Value t

    output

    Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.

    This Op does not require a_indices be sorted in standard lexicographic order.

    size Source

    Arguments

    :: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
    => Tensor v1 t

    input

    -> Tensor Value out_type

    output

    Returns the size of a tensor.

    This operation returns an integer representing the number of elements in + input.

    For example:

    ```prettyprint + # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + size(t) ==> 12 + ```

    barrier Source

    Arguments

    :: Tensor Value ByteString

    handle: The handle to the barrier.

    Defines a barrier that persists across different graph executions.

    A barrier represents a key-value map, where each key is a string, and + each value is a tuple of tensors.

    At runtime, the barrier contains complete and incomplete + elements. A complete element has defined tensors for all components of + its value tuple, and may be accessed using BarrierTakeMany. An + incomplete element has some undefined components in its value tuple, + and may be updated using BarrierInsertMany.

    lgamma Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the log of the absolute value of `Gamma(x)` element-wise.

    decodeJpeg Source

    Arguments

    :: Tensor v1 ByteString

    contents: 0-D. The JPEG-encoded image.

    -> Tensor Value Word8

    image: 3-D with shape `[height, width, channels]`..

    Decode a JPEG-encoded image to a uint8 tensor.

    The attr channels indicates the desired number of color channels for the + decoded image.

    Accepted values are:

    • 0: Use the number of channels in the JPEG-encoded image.
    • 1: output a grayscale image.
    • 3: output an RGB image.

    If needed, the JPEG-encoded image is transformed to match the requested number + of color channels.

    The attr ratio allows downscaling the image by an integer factor during + decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + downscaling the image later.

    shapeN Source

    Arguments

    :: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
    => [Tensor v1 t]

    input

    -> [Tensor Value out_type]

    output

    Returns shape of tensors.

    This operation returns N 1-D integer tensors representing shape of `input[i]s`.

    uniformCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to randomly sample per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Int64

    range_max: The sampler will sample integers from the interval [0, range_max).

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a uniform distribution.

    See explanations of candidate sampling and the data formats at + go/candidate-sampling.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    unique Source

    Arguments

    :: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
    => Tensor v1 t

    x: 1-D.

    -> (Tensor Value t, Tensor Value out_idx)

    (y, idx)

    • y: 1-D.
    • idx: 1-D.

    Finds unique elements in a 1-D tensor.

    This operation returns a tensor y containing all of the unique elements of x + sorted in the same order that they occur in x. This operation also returns a + tensor idx the same size as x that contains the index of each value of x + in the unique output y. In other words:

    `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

    For example:

    ```prettyprint + # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] + y, idx = unique(x) + y ==> [1, 2, 4, 7, 8] + idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + ```

    drawBoundingBoxes Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t) 
    => Tensor v1 t

    images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

    -> Tensor v2 Float

    boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + boxes.

    -> Tensor Value t

    output: 4-D with the same shape as images. The batch of input images with + bounding boxes drawn on the images.

    Draw bounding boxes on a batch of images.

    Outputs a copy of images but draws on top of the pixels zero or more bounding + boxes specified by the locations in boxes. The coordinates of the each + bounding box in boxes are encoded as `[y_min, x_min, y_max, x_max]`. The + bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + height of the underlying image.

    For example, if an image is 100 x 200 pixels and the bounding box is + `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the + bounding box will be `(10, 40)` to `(50, 180)`.

    Parts of the bounding box may fall outside the image.

    tensorArraySplit Source

    Arguments

    :: TensorType t 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 t

    value: The concatenated tensor to write to the TensorArray.

    -> Tensor v3 Int64

    lengths: The vector of lengths, how to split the rows of value into the + TensorArray.

    -> Tensor v4 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value Float

    flow_out: A float scalar that enforces proper chaining of operations.

    Split the data from the input value into TensorArray elements.

    Assuming that lengths takes on values

    ```(n0, n1, ..., n(T-1))```

    and that value has shape

    ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,

    this splits values into a TensorArray with T tensors.

    TensorArray index t will be the subtensor of values with starting position

    ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```

    and having size

    ```nt x d0 x d1 x ...```

    split Source

    Arguments

    :: TensorType t 
    => Int64

    num_split: The number of ways to split. Must evenly divide + `value.shape[split_dim]`.

    -> Tensor v1 Int32

    split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(value))`.

    -> Tensor v2 t

    value: The tensor to split.

    -> [Tensor Value t]

    output: They are identically shaped tensors, whose shape matches that of value + except along split_dim, where their sizes are + `values.shape[split_dim] / num_split`.

    Splits a tensor into num_split tensors along one dimension.

    segmentMax Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the maximum along segments of a tensor.

    Read the section on Segmentation + for an explanation of segments.

    Computes a tensor such that + \(output_i = max_j(data_j)\) where max is over j such + that `segment_ids[j] == i`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMax.png" alt + /div

    abort :: ControlNode Source

    Raise a exception to abort the process when called.

    Returns nothing but an exception.

    sparseReorder Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int64

    input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    input_values: 1-D. N non-empty values corresponding to input_indices.

    -> Tensor v3 Int64

    input_shape: 1-D. Shape of the input SparseTensor.

    -> (Tensor Value Int64, Tensor Value t)

    (output_indices, output_values)

    • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + in canonical row-major ordering.
    • output_values: 1-D. N non-empty values corresponding to output_indices.

    Reorders a SparseTensor into the canonical, row-major ordering.

    Note that by convention, all sparse ops preserve the canonical ordering along + increasing dimension number. The only time ordering can be violated is during + manual manipulation of the indices and values vectors to add entries.

    Reordering does not affect the shape of the SparseTensor.

    If the tensor has rank R and N non-empty values, input_indices has + shape `[N, R]`, input_values has length N, and input_shape has length R.

    rsqrtGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the gradient for the rsqrt of x wrt its input.

    Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy + is the corresponding input gradient.

    reverseSequence Source

    Arguments

    :: (TensorType t, TensorType tlen, OneOf `[Int32, Int64]` tlen) 
    => Int64

    seq_dim: The dimension which is partially reversed.

    -> Tensor v1 t

    input: The input to reverse.

    -> Tensor v2 tlen

    seq_lengths: 1-D with length `input.dims(batch_dim)` and + `max(seq_lengths) < input.dims(seq_dim)`

    -> Tensor Value t

    output: The partially reversed input. It has the same shape as input.

    Reverses variable length slices.

    This op first slices input along the dimension batch_dim, and for each + slice i, reverses the first `seq_lengths[i]` elements along + the dimension seq_dim.

    The elements of seq_lengths must obey `seq_lengths[i] < input.dims[seq_dim]`, + and seq_lengths must be a vector of length `input.dims[batch_dim]`.

    The output slice i along dimension batch_dim is then given by input + slice i, with the first `seq_lengths[i]` slices along dimension + seq_dim reversed.

    For example:

    ```prettyprint + # Given this: + batch_dim = 0 + seq_dim = 1 + input.dims = (4, 8, ...) + seq_lengths = [7, 2, 3, 5]

    # then slices of input are reversed on seq_dim, but only up to seq_lengths: + output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]

    # while entries past seq_lens are copied through: + output[0, 7:, :, ...] = input[0, 7:, :, ...] + output[1, 2:, :, ...] = input[1, 2:, :, ...] + output[2, 3:, :, ...] = input[2, 3:, :, ...] + output[3, 2:, :, ...] = input[3, 2:, :, ...] + ```

    In contrast, if:

    ```prettyprint + # Given this: + batch_dim = 2 + seq_dim = 0 + input.dims = (8, ?, 4, ...) + seq_lengths = [7, 2, 3, 5]

    # then slices of input are reversed on seq_dim, but only up to seq_lengths: + output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]

    # while entries past seq_lens are copied through: + output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + ```

    readerNumRecordsProduced Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor Value Int64

    records_produced

    Returns the number of records this Reader has produced.

    This is the same as the number of ReaderRead executions that have + succeeded.

    deserializeManySparse Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    serialized_sparse: 2-D, The N serialized SparseTensor objects. + Must have 3 columns.

    -> (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

    (sparse_indices, sparse_values, sparse_shape)

    • sparse_indices
    • sparse_values
    • sparse_shape

    Deserialize and concatenate SparseTensors from a serialized minibatch.

    The input serialized_sparse must be a string matrix of shape `[N x 3]` where + N is the minibatch size and the rows correspond to packed outputs of + SerializeSparse. The ranks of the original SparseTensor objects + must all match. When the final SparseTensor is created, it has rank one + higher than the ranks of the incoming SparseTensor objects + (they have been concatenated along a new row dimension).

    The output SparseTensor object's shape values for all dimensions but the + first are the max across the input SparseTensor objects' shape values + for the corresponding dimensions. Its first shape value is N, the minibatch + size.

    The input SparseTensor objects' indices are assumed ordered in + standard lexicographic order. If this is not the case, after this + step run SparseReorder to restore index ordering.

    For example, if the serialized input is a `[2 x 3]` matrix representing two + original SparseTensor objects:

    index = [ 0] + [10] + [20] + values = [1, 2, 3] + shape = [50]

    and

    index = [ 2] + [10] + values = [4, 5] + shape = [30]

    then the final deserialized SparseTensor will be:

    index = [0 0] + [0 10] + [0 20] + [1 2] + [1 10] + values = [1, 2, 3, 4, 5] + shape = [2 50]

    immutableConst Source

    Arguments

    :: TensorType dtype 
    => Tensor Value dtype

    tensor

    Returns immutable tensor from memory region.

    The current implementation memmaps the tensor from a file.

    minimum Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns the min of x and y (i.e. x < y ? x : y) element-wise.

    • NOTE*: Minimum supports broadcasting. More about broadcasting + here

    initializeTableFromTextFile Source

    Arguments

    :: Int64

    key_index: Column index in a line to get the table key values from.

    -> Int64

    value_index: Column index that represents information of a line to get the table + value values from.

    -> Tensor v1 ByteString

    table_handle: Handle to a table which will be initialized.

    -> Tensor v2 ByteString

    filename: Filename of a vocabulary text file.

    -> ControlNode 

    Initializes a table from a text file.

    It inserts one key-value pair into the table for each line of the file. + The key and value is extracted from the whole line content, elements from the + split line based on delimiter or the line number (starting from zero). + Where to extract the key and value from a line is specified by key_index and + value_index.

    • A value of -1 means use the line number(starting from zero), expects int64.
    • A value of -2 means use the whole line content, expects string.
    • A value >= 0 means use the index (starting at zero) of the split line based + on delimiter.

    diagPart Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    input: Rank k tensor where k is 2, 4, or 6.

    -> Tensor Value t

    diagonal: The extracted diagonal.

    Returns the diagonal part of the tensor.

    This operation returns a tensor with the diagonal part + of the input. The diagonal part is computed as follows:

    Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + tensor of rank k with dimensions `[D1,..., Dk]` where:

    `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.

    For example:

    ```prettyprint + # input is [[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]]

    tf.diag_part(input) ==> [1, 2, 3, 4] + ```

    log Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes natural logarithm of x element-wise.

    I.e., \(y = log_e x\).

    tensorArrayScatter Source

    Arguments

    :: TensorType t 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Int32

    indices: The locations at which to write the tensor elements.

    -> Tensor v3 t

    value: The concatenated tensor to write to the TensorArray.

    -> Tensor v4 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value Float

    flow_out: A float scalar that enforces proper chaining of operations.

    Scatter the data from the input value into specific TensorArray elements.

    indices must be a vector, its length must match the first dim of value.

    rank Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor Value Int32

    output

    Returns the rank of a tensor.

    This operation returns an integer representing the rank of input.

    For example:

    ```prettyprint + # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + # shape of tensor t is [2, 2, 3] + rank(t) ==> 3 + ```

    • *Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + of a tensor is the number of indices required to uniquely select each element + of the tensor. Rank is also known as "order", "degree", or "ndims."

    identity Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    Return a tensor with the same shape and contents as the input tensor or value.

    adjustContrastv2 Source

    Arguments

    :: Tensor v1 Float

    images: Images to adjust. At least 3-D.

    -> Tensor v2 Float

    contrast_factor: A float multiplier for adjusting contrast.

    -> Tensor Value Float

    output: The contrast-adjusted image or images.

    Adjust the contrast of one or more images.

    images is a tensor of at least 3 dimensions. The last 3 dimensions are + interpreted as `[height, width, channels]`. The other dimensions only + represent a collection of images, such as `[batch, height, width, channels].`

    Contrast is adjusted independently for each channel of each image.

    For each channel, the Op first computes the mean of the image pixels in the + channel and then adjusts each component of each pixel to + `(x - mean) * contrast_factor + mean`.

    sparseApplyProximalAdagrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    lr: Learning rate. Must be a scalar.

    -> Tensor v4 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v5 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v6 t

    grad: The gradient.

    -> Tensor v7 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor Value t

    out: Same as "var".

    Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

    That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + prox_v = var + prox_v -= lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

    gather Source

    Arguments

    :: (TensorType tindices, OneOf `[Int32, Int64]` tindices, TensorType tparams) 
    => Tensor v1 tparams

    params

    -> Tensor v2 tindices

    indices

    -> Tensor Value tparams

    output

    Gather slices from params according to indices.

    indices must be an integer tensor of any dimension (usually 0-D or 1-D). + Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

    # Scalar indices + output[:, ..., :] = params[indices, :, ... :]

    # Vector indices + output[i, :, ..., :] = params[indices[i], :, ... :]

    # Higher rank indices + output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]

    If indices is a permutation and `len(indices) == params.shape[0]` then + this operation will permute params accordingly.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/Gather.png" alt + /div

    isVariableInitialized Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 dtype

    ref: Should be from a Variable node. May be uninitialized.

    -> Tensor Value Bool

    is_initialized

    Checks whether a tensor has been initialized.

    Outputs boolean scalar indicating whether the tensor has been initialized.

    concat Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

    -> [Tensor v2 t]

    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

    -> Tensor Value t

    output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

    Concatenates tensors along one dimension.

    randomUniformInt Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t, TensorType tout, OneOf `[Int32, Int64]` tout) 
    => Tensor v1 t

    shape: The shape of the output tensor.

    -> Tensor v2 tout

    minval: 0-D. Inclusive lower bound on the generated integers.

    -> Tensor v3 tout

    maxval: 0-D. Exclusive upper bound on the generated integers.

    -> Tensor Value tout

    output: A tensor of the specified shape filled with uniform random integers.

    Outputs random integers from a uniform distribution.

    The generated values are uniform integers in the range `[minval, maxval)`. + The lower bound minval is included in the range, while the upper bound + maxval is excluded.

    The random integers are slightly biased unless `maxval - minval` is an exact + power of two. The bias is small for values of `maxval - minval` significantly + smaller than the range of the output (either `2^32` or `2^64`).

    stopGradient Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    Stops gradient computation.

    When executed in a graph, this op outputs its input tensor as-is.

    When building ops to compute gradients, this op prevents the contribution of + its inputs to be taken into account. Normally, the gradient generator adds ops + to a graph to compute the derivatives of a specified loss by recursively + finding out inputs that contributed to its computation. If you insert this op + in the graph it inputs are masked from the gradient generator. They are not + taken into account for computing gradients.

    This is useful any time you want to compute a value with TensorFlow but need + to pretend that the value was a constant. Some examples include:

    • The *EM* algorithm where the *M-step* should not involve backpropagation + through the output of the *E-step*.
    • Contrastive divergence training of Boltzmann machines where, when + differentiating the energy function, the training must not backpropagate + through the graph that generated the samples from the model.
    • Adversarial training, where no backprop should happen through the adversarial + example generation process.

    avgPool Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Value t

    output: The average pooled output tensor.

    Performs average pooling on the input.

    Each entry in output is the mean of the corresponding size ksize + window in value.

    wholeFileReader Source

    Arguments

    :: Tensor Value ByteString

    reader_handle: The handle to reference the Reader.

    A Reader that outputs the entire contents of a file as a value.

    To use, enqueue filenames in a Queue. The output of ReaderRead will + be a filename (key) and the contents of that file (value).

    switch Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be forwarded to the appropriate output.

    -> Tensor v2 Bool

    pred: A scalar that specifies which output port will receive data.

    -> (Tensor Value t, Tensor Value t)

    (output_false, output_true)

    • output_false: If pred is false, data will be forwarded to this output.
    • output_true: If pred is true, data will be forwarded to this output.

    Forwards `data` to the output port determined by pred.

    If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

    See also RefSwitch and Merge.

    randomStandardNormal Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t, TensorType dtype, OneOf `[Word16, Double, Float]` dtype) 
    => Tensor v1 t

    shape: The shape of the output tensor.

    -> Tensor Value dtype

    output: A tensor of the specified shape filled with random normal values.

    Outputs random values from a normal distribution.

    The generated values will have mean 0 and standard deviation 1.

    sigmoid Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes sigmoid of x element-wise.

    Specifically, `y = 1 / (1 + exp(-x))`.

    sampleDistortedBoundingBox Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
    => Tensor v1 t

    image_size: 1-D, containing `[height, width, channels]`.

    -> Tensor v2 Float

    bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

    -> (Tensor Value t, Tensor Value t, Tensor Value Float)

    (begin, size, bboxes)

    • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + `tf.slice`.
    • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + `tf.slice`.
    • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + Provide as input to `tf.image.draw_bounding_boxes`.

    Generate a single randomly distorted bounding box for an image.

    Bounding box annotations are often supplied in addition to ground-truth labels + in image recognition or object localization tasks. A common technique for + training such a system is to randomly distort an image while preserving + its content, i.e. *data augmentation*. This Op outputs a randomly distorted + localization of an object, i.e. bounding box, given an image_size, + bounding_boxes and a series of constraints.

    The output of this Op is a single bounding box that may be used to crop the + original image. The output is returned as 3 tensors: begin, size and + bboxes. The first 2 tensors can be fed directly into `tf.slice` to crop the + image. The latter may be supplied to `tf.image.draw_bounding_box` to visualize + what the bounding box looks like.

    Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + height of the underlying image.

    For example,

    # Generate a single distorted bounding box. + begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bounding_boxes)

    # Draw the bounding box in an image summary. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox_for_draw) + tf.image_summary(images_with_box, image_with_box)

    # Employ the bounding box to distort the image. + distorted_image = tf.slice(image, begin, size)

    Note that if no bounding box information is available, setting + `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + bounding box covering the whole image. If use_image_if_no_bounding_boxes is + false and no bounding boxes are supplied, an error is raised.

    greater Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value Bool

    z

    Returns the truth value of (x > y) element-wise.

    • NOTE*: Greater supports broadcasting. More about broadcasting + here

    refNextIteration Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the next iteration.

    -> Tensor Value t

    output: The same tensor as `data`.

    Makes its input available to the next iteration.

    spaceToDepth Source

    Arguments

    :: TensorType t 
    => Int64

    block_size: The size of the spatial block.

    -> Tensor v1 t

    input

    -> Tensor Value t

    output

    SpaceToDepth for tensors of type T.

    Rearranges blocks of spatial data, into depth. More specifically, + this op outputs a copy of the input tensor where values from the height + and width dimensions are moved to the depth dimension. + The attr block_size indicates the input block size and how the data is moved.

    • Non-overlapping blocks of size `block_size x block size` are rearranged + into depth at each location.
    • The depth of the output tensor is `input_depth * block_size * block_size`.
    • The input tensor's height and width must be divisible by block_size.

    That is, assuming the input is in the shape: + `[batch, height, width, depth]`, + the shape of the output will be: + `[batch, heightblock_size, widthblock_size, depth*block_size*block_size]`

    This operation requires that the input tensor be of rank 4, and that + block_size be >=1 and a divisor of both the input height and width.

    This operation is useful for resizing the activations between convolutions + (but keeping all data), e.g. instead of pooling. It is also useful for training + purely convolutional models.

    For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:

    ```prettyprint + x = [[[[1], [2]], + [[3], [4]]]] + ```

    This operation will output a tensor of shape `[1, 1, 1, 4]`:

    ```prettyprint + [[[[1, 2, 3, 4]]]] + ```

    Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + the corresponding output will have a single element (i.e. width and height are + both 1) and will have a depth of 4 channels (1 * block_size * block_size). + The output element shape is `[1, 1, 4]`.

    For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.

    ```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

    This operation, for block_size of 2, will return the following tensor of shape + `[1, 1, 1, 12]`

    ```prettyprint + [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + ```

    Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:

    ```prettyprint + x = [[[[1], [2], [5], [6]], + [[3], [4], [7], [8]], + [[9], [10], [13], [14]], + [[11], [12], [15], [16]]]] + ```

    the operator will return the following tensor of shape `[1 2 2 4]`:

    ```prettyprint + x = [[[[1, 2, 3, 4], + [5, 6, 7, 8]], + [[9, 10, 11, 12], + [13, 14, 15, 16]]]] + ```

    controlTrigger :: ControlNode Source

    Does nothing. Serves as a control trigger for scheduling.

    Only useful as a placeholder for control edges.

    scatterDiv Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    ref: Should be from a Variable node.

    -> Tensor v2 tindices

    indices: A tensor of indices into the first dimension of ref.

    -> Tensor v3 t

    updates: A tensor of values that ref is divided by.

    -> Tensor Value t

    output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

    Divides a variable reference by sparse updates.

    This operation computes

    # Scalar indices + ref[indices, ...] /= updates[...]

    # Vector indices (for each i) + ref[indices[i], ...] /= updates[i, ...]

    # High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]

    This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

    Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions divide.

    Requires `updates.shape = indices.shape + ref.shape[1:]`.

    copy Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Input tensor.

    -> Tensor Value t

    output: Output tensor, deep-copied from input.

    Copy Op.

    Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the + device on which the tensor is allocated.

    Unlike the CopyHost Op, this op does not have HostMemory constraint on its + input or output.

    cropAndResizeGradBoxes Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Float

    grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

    -> Tensor v2 t

    image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

    -> Tensor v3 Float

    boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

    -> Tensor v4 Int32

    box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

    -> Tensor Value Float

    output: A 2-D tensor of shape `[num_boxes, 4]`.

    Computes the gradient of the crop_and_resize op wrt the input boxes tensor.

    sparseSegmentMean Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    data

    -> Tensor v2 tidx

    indices: A 1-D tensor. Has same rank as segment_ids.

    -> Tensor v3 Int32

    segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the mean along sparse segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Like SegmentMean, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

    assign Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    ref: Should be from a Variable node. May be uninitialized.

    -> Tensor v2 t

    value: The value to be assigned to the variable.

    -> Tensor Value t

    output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

    Update ref by assigning value to it.

    This operation outputs "ref" after the assignment is done. + This makes it easier to chain operations that need to use the reset value.

    restore Source

    Arguments

    :: TensorType dt 
    => Tensor v1 ByteString

    file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

    -> Tensor v2 ByteString

    tensor_name: Must have a single element. The name of the tensor to be + restored.

    -> Tensor Value dt

    tensor: The restored tensor.

    Restores a tensor from checkpoint files.

    Reads a tensor stored in one or several files. If there are several files (for + instance because a tensor was saved as slices), file_pattern may contain + wildcard symbols (* and ?) in the filename portion only, not in the + directory portion.

    If a file_pattern matches several files, preferred_shard can be used to hint + in which file the requested tensor is likely to be found. This op will first + open the file at index preferred_shard in the list of matching files and try + to restore tensors from that file. Only if some tensors or tensor slices are + not found in that first file, then the Op opens all the files. Setting + preferred_shard to match the value passed as the shard input + of a matching Save Op may speed up Restore. This attribute only affects + performance, not correctness. The default value -1 means files are processed in + order.

    See also RestoreSlice.

    maxPoolGradWithArgmax Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t, TensorType targmax, OneOf `[Int32, Int64]` targmax) 
    => Tensor v1 t

    input: The original input.

    -> Tensor v2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + output of max_pool.

    -> Tensor v3 targmax

    argmax: The indices of the maximum values chosen for each output of max_pool.

    -> Tensor Value t

    output: Gradients w.r.t. the input of max_pool.

    Computes gradients of the maxpooling function.

    checkNumerics Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    tensor

    -> Tensor Value t

    output

    Checks a tensor for NaN and Inf values.

    When run, reports an InvalidArgument error if tensor has any values + that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.

    zerosLike Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    x: a tensor of type T.

    -> Tensor Value t

    y: a tensor of the same shape and type as x but filled with zeros.

    Returns a tensor of zeros with the same shape and type as x.

    readFile Source

    Arguments

    :: Tensor v1 ByteString

    filename

    -> Tensor Value ByteString

    contents

    Reads and outputs the entire contents of the input filename.

    transpose Source

    Arguments

    :: (TensorType t, TensorType tperm, OneOf `[Int32, Int64]` tperm) 
    => Tensor v1 t

    x

    -> Tensor v2 tperm

    perm

    -> Tensor Value t

    y

    Shuffle dimensions of x according to a permutation.

    The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

    parseTensor Source

    Arguments

    :: TensorType out_type 
    => Tensor v1 ByteString

    serialized: A scalar string containing a serialized TensorProto proto.

    -> Tensor Value out_type

    output: A Tensor of type out_type.

    Transforms a serialized tensorflow.TensorProto proto into a Tensor.

    acos Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes acos of x element-wise.

    bitcast Source

    Bitcasts a tensor from one type to another without copying data.

    Given a tensor input, this operation returns a tensor that has the same buffer + data as input with datatype `type`.

    If the input datatype T is larger than the output datatype `type` then the + shape changes from [...] to [..., sizeof(T)/sizeof(`type`)].

    If T is smaller than `type`, the operator requires that the rightmost + dimension be equal to sizeof(`type`)/sizeof(T). The shape then goes from + [..., sizeof(`type`)/sizeof(T)] to [...].

    • NOTE*: Bitcast is implemented as a low-level cast, so machines with different + endian orderings will give different results.

    lookupTableImport Source

    Arguments

    :: (TensorType tin, TensorType tout) 
    => Tensor v1 ByteString

    table_handle: Handle to the table.

    -> Tensor v2 tin

    keys: Any shape. Keys to look up.

    -> Tensor v3 tout

    values: Values to associate with keys.

    -> ControlNode 

    Replaces the contents of the table with the specified keys and values.

    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

    biasAddGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    out_backprop: Any number of dimensions.

    -> Tensor Value t

    output: 1-D with size the feature dimension of out_backprop.

    The backward operation for BiasAdd on the "bias" tensor.

    It accumulates all the values from out_backprop into the feature dimension. + For NHWC data format, the feature dimension is the last. For NCHW data format, + the feature dimension is the third-to-last.

    batchSelfAdjointEig Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    prod Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the product of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    resizeBilinear Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    images: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 Int32

    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

    -> Tensor Value Float

    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

    Resize images to size using bilinear interpolation.

    Input images can be of different types but output images are always float.

    tensorArrayUnpack Source

    Arguments

    :: TensorType t 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 t

    value: The concatenated tensor to write to the TensorArray.

    -> Tensor v3 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value Float

    flow_out: A float scalar that enforces proper chaining of operations.

    Unpack the data from the input value into TensorArray elements.

    • *WARNING: This op is deprecated.**

    Instead of this op, use TensorArrayScatter with + `indices = RangeOp(0, SizeOp(value)[0])`.

    batchMatrixDeterminant Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    sum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the sum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    iFFT2D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their inverse 2D Fourier Transform.

    Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most

    2 dimensions of input.

    fill Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    dims: 1-D. Represents the shape of the output tensor.

    -> Tensor v2 t

    value: 0-D (scalar). Value to fill the returned tensor.

    -> Tensor Value t

    output

    Creates a tensor filled with a scalar value.

    This operation creates a tensor of shape dims and fills it with value.

    For example:

    ```prettyprint + # Output tensor has shape [2, 3]. + fill([2, 3], 9) ==> [[9, 9, 9] + [9, 9, 9]] + ```

    fixedUnigramCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to randomly sample per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Int64

    range_max: The sampler will sample integers from the interval [0, range_max).

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a learned unigram distribution.

    A unigram sampler could use a fixed unigram distribution read from a + file or passed in as an in-memory array instead of building up the distribution + from data on the fly. There is also an option to skew the distribution by + applying a distortion power to the weights.

    The vocabulary file should be in CSV-like format, with the last field + being the weight associated with the word.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    dilation2D Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, depth]`.

    -> Tensor v2 t

    filter: 3-D with shape `[filter_height, filter_width, depth]`.

    -> Tensor Value t

    output: 4-D with shape `[batch, out_height, out_width, depth]`.

    Computes the grayscale dilation of 4-D input and 3-D filter tensors.

    The input tensor has shape `[batch, in_height, in_width, depth]` and the + filter tensor has shape `[filter_height, filter_width, depth]`, i.e., each + input channel is processed independently of the others with its own structuring + function. The output tensor has shape + `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + tensor depend on the padding algorithm. We currently only support the default + NHWC data_format.

    In detail, the grayscale morphological 2-D dilation is the max-sum correlation + (for consistency with conv2d, we use unmirrored filters):

    output[b, y, x, c] = + max_{dy, dx} input[b, + strides[1] * y + rates[1] * dy, + strides[2] * x + rates[2] * dx, + c] + + filter[dy, dx, c]

    Max-pooling is a special case when the filter has size equal to the pooling + kernel size and contains all zeros.

    Note on duality: The dilation of input by the filter is equal to the + negation of the erosion of `-input` by the reflected filter.

    polygamma Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    x

    -> Tensor Value t

    z

    Compute the polygamma function \(psi^{(n)}(x)\).

    The polygamma function is defined as:

    ``` + psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) + ``` + where \(psi(x)\) is the digamma function.

    refIdentity Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    Return the same ref tensor as the input ref tensor.

    encodePng Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Word8]` t) 
    => Tensor v1 t

    image: 3-D with shape `[height, width, channels]`.

    -> Tensor Value ByteString

    contents: 0-D. PNG-encoded image.

    PNG-encode an image.

    image is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + where channels is:

    • 1: for grayscale.
    • 2: for grayscale + alpha.
    • 3: for RGB.
    • 4: for RGBA.

    The ZLIB compression level, compression, can be -1 for the PNG-encoder + default or a value from 0 to 9. 9 is the highest compression level, generating + the smallest output, but is slower.

    lookupTableInsert Source

    Arguments

    :: (TensorType tin, TensorType tout) 
    => Tensor v1 ByteString

    table_handle: Handle to the table.

    -> Tensor v2 tin

    keys: Any shape. Keys to look up.

    -> Tensor v3 tout

    values: Values to associate with keys.

    -> ControlNode 

    Updates the table to associates keys with values.

    The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

    batchIFFT2D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    uniqueWithCounts Source

    Arguments

    :: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
    => Tensor v1 t

    x: 1-D.

    -> (Tensor Value t, Tensor Value out_idx, Tensor Value out_idx)

    (y, idx, count)

    • y: 1-D.
    • idx: 1-D.
    • count: 1-D.

    Finds unique elements in a 1-D tensor.

    This operation returns a tensor y containing all of the unique elements of x + sorted in the same order that they occur in x. This operation also returns a + tensor idx the same size as x that contains the index of each value of x + in the unique output y. Finally, it returns a third tensor count that + contains the count of each element of y in x. In other words:

    `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

    For example:

    ```prettyprint + # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] + y, idx, count = unique_with_counts(x) + y ==> [1, 2, 4, 7, 8] + idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + count ==> [2, 1, 3, 1, 2] + ```

    gatherNd Source

    Arguments

    :: (TensorType tindices, OneOf `[Int32, Int64]` tindices, TensorType tparams) 
    => Tensor v1 tparams

    params: `M-D`. The tensor from which to gather values.

    -> Tensor v2 tindices

    indices: `(N+1)-D`. Index tensor having shape `[d_0, ..., d_N, R]`.

    -> Tensor Value tparams

    output: `(N+M-R)-D`. Values from params gathered from indices given by + indices.

    Gather values or slices from params according to indices.

    params is a Tensor of rank R and indices is a Tensor of rank M.

    indices must be integer tensor, containing indices into params. + It must be shape `[d_0, ..., d_N, R]` where `0 < R <= M`.

    The innermost dimension of indices (with length R) corresponds to + indices into elements (if `R = M`) or slices (if `R < M`) along the Nth + dimension of params.

    Produces an output tensor with shape

    d_0, ..., d_{n-1}, params.shape[R
    , ..., params.shape[M-1]].

    Some examples below.

    Simple indexing into a matrix:

    indices = [[0, 0], [1, 1]] + params = [[a, b], [c, d]] + output = [a, d]

    Slice indexing into a matrix:

    indices = [[1], [0]] + params = [[a, b], [c, d]] + output = [[c, d], [a, b]]

    Indexing into a 3-tensor:

    indices = [[1]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[a1, b1], [c1, d1]]]

    indices = [[0, 1], [1, 0]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[c0, d0], [a1, b1]]

    indices = [[0, 0, 1], [1, 0, 1]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [b0, b1]

    Batched indexing into a matrix:

    indices = [[[0, 0]], [[0, 1]]] + params = [[a, b], [c, d]] + output = [[a], [b]]

    Batched slice indexing into a matrix:

    indices = [[[1]], [[0]]] + params = [[a, b], [c, d]] + output = [[[c, d]], [[a, b]]]

    Batched indexing into a 3-tensor:

    indices = [[[1]], [[0]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[[a1, b1], [c1, d1]]], + [[[a0, b0], [c0, d0]]]]

    indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[c0, d0], [a1, b1]], + [[a0, b0], [c1, d1]]]

    indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[b0, b1], [d0, c1]]

    tensorArrayRead Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Int32

    index

    -> Tensor v3 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value dtype

    value: The tensor that is read from the TensorArray.

    Read an element from the TensorArray into output value.

    readerReadUpTo Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor v2 ByteString

    queue_handle: Handle to a Queue, with string work items.

    -> Tensor v3 Int64

    num_records: number of records to read from Reader.

    -> (Tensor Value ByteString, Tensor Value ByteString)

    (keys, values)

    • keys: A 1-D tensor.
    • values: A 1-D tensor.

    Returns up to num_records (key, value) pairs produced by a Reader.

    Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file). + It may return less than num_records even before the last batch.

    betainc Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    b

    -> Tensor v3 t

    x

    -> Tensor Value t

    z

    Compute the regularized incomplete beta integral \(I_x(a, b)\).

    The regularized incomplete beta integral is defined as:

    ``` + I_x(a, b) = frac{B(x; a, b)}{B(a, b)} + ``` + where

    ``` + B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt + ```

    is the incomplete beta function and \(B(a, b)\) is the *complete* + beta function.

    batchMatrixBandPart Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor v2 Int64

    num_lower

    -> Tensor v3 Int64

    num_upper

    -> Tensor Value t

    band

    depthwiseConv2dNativeBackpropInput Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 Int32

    input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

    -> Tensor v2 t

    filter: 4-D with shape + `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

    -> Tensor Value t

    output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

    Computes the gradients of depthwise convolution with respect to the input.

    refSelect Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    index: A scalar that determines the input that gets selected.

    -> [Tensor v2 t]

    inputs: A list of ref tensors, one of which will be forwarded to output.

    -> Tensor Value t

    output: The forwarded tensor.

    Forwards the indexth element of inputs to output.

    exit Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the parent frame.

    -> Tensor Value t

    output: The same tensor as `data`.

    Exits the current frame to its parent frame.

    Exit makes its input `data` available to the parent frame.

    lookupTableFind Source

    Arguments

    :: (TensorType tin, TensorType tout) 
    => Tensor v1 ByteString

    table_handle: Handle to the table.

    -> Tensor v2 tin

    keys: Any shape. Keys to look up.

    -> Tensor v3 tout

    default_value

    -> Tensor Value tout

    values: Same shape as keys. Values found in the table, or default_values + for missing keys.

    Looks up keys in a table, outputs the corresponding values.

    The tensor keys must of the same type as the keys of the table. + The output values is of the type of the table values.

    The scalar default_value is the value output for keys not present in the + table. It must also be of the same type as the table values.

    squeeze Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: The input to squeeze.

    -> Tensor Value t

    output: Contains the same data as input, but has one or more dimensions of + size 1 removed.

    Removes dimensions of size 1 from the shape of a tensor.

    Given a tensor input, this operation returns a tensor of the same type with + all dimensions of size 1 removed. If you don't want to remove all size 1 + dimensions, you can remove specific size 1 dimensions by specifying + squeeze_dims.

    For example:

    ```prettyprint + # t is a tensor of shape [1, 2, 1, 3, 1, 1] + shape(squeeze(t)) ==> [2, 3] + ```

    Or, to remove specific size 1 dimensions:

    ```prettyprint + # t is a tensor of shape [1, 2, 1, 3, 1, 1] + shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + ```

    mean Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the mean of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    spaceToBatchND Source

    Arguments

    :: (TensorType t, TensorType tblock_shape, OneOf `[Int32, Int64]` tblock_shape, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
    => Tensor v1 t

    input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

    -> Tensor v2 tblock_shape

    block_shape: 1-D with shape `[M]`, all values must be >= 1.

    -> Tensor v3 tpaddings

    paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + `i + 1`, which corresponds to spatial dimension i. It is required that + `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.

    This operation is equivalent to the following steps:

    1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + input according to paddings to produce padded of shape padded_shape.
    2. Reshape padded to reshaped_padded of shape: + [batch] + + [padded_shape[1] / block_shape[0], + block_shape[0], + ..., + padded_shape[M] / block_shape[M-1], + block_shape[M-1]] + + remaining_shape
    3. Permute dimensions of reshaped_padded to produce + permuted_reshaped_padded of shape: + block_shape + + [batch] + + [padded_shape[1] / block_shape[0], + ..., + padded_shape[M] / block_shape[M-1]] + + remaining_shape
    4. Reshape permuted_reshaped_padded to flatten block_shape into the batch + dimension, producing an output tensor of shape: + [batch * prod(block_shape)] + + [padded_shape[1] / block_shape[0], + ..., + padded_shape[M] / block_shape[M-1]] + + remaining_shape

    Some examples:

    1. For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

    ```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

    The output tensor has shape `[4, 1, 1, 1]` and value:

    ```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

    1. For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

    ```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

    The output tensor has shape `[4, 1, 1, 3]` and value:

    ```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

    1. For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

    ```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

    The output tensor has shape `[4, 2, 2, 1]` and value:

    ```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

    1. For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + paddings = `[[0, 0], [2, 0]]`:

    ```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

    The output tensor has shape `[8, 1, 3, 1]` and value:

    ```prettyprint + x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + [[[0], [2], [4]]], [[[0], [10], [12]]], + [[[0], [5], [7]]], [[[0], [13], [15]]], + [[[0], [6], [8]]], [[[0], [14], [16]]]] + ```

    Among others, this operation is useful for reducing atrous convolution into + regular convolution.

    -> Tensor Value t

    output

    SpaceToBatch for N-D tensors of type T.

    This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + grid of blocks of shape block_shape, and interleaves these blocks with the + "batch" dimension (0) such that in the output, the spatial dimensions + `[1, ..., M]` correspond to the position within the grid, and the batch + dimension combines both the position within a spatial block and the original + batch position. Prior to division into blocks, the spatial dimensions of the + input are optionally zero padded according to paddings. See below for a + precise description.

    spaceToBatch Source

    Arguments

    :: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
    => Int64

    block_size

    -> Tensor v1 t

    input: 4-D with shape `[batch, height, width, depth]`.

    -> Tensor v2 tpaddings

    paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + the padding of the input with zeros across the spatial dimensions as follows:

    paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]

    The effective spatial dimensions of the zero-padded input tensor will be:

    height_pad = pad_top + height + pad_bottom + width_pad = pad_left + width + pad_right

    The attr block_size must be greater than one. It indicates the block size.

    • Non-overlapping blocks of size `block_size x block size` in the height and + width dimensions are rearranged into the batch dimension at each location.
    • The batch of the output tensor is `batch * block_size * block_size`.
    • Both height_pad and width_pad must be divisible by block_size.

    The shape of the output will be:

    [batch*block_size*block_size, height_padblock_size, width_padblock_size, + depth]

    Some examples:

    1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

    ```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

    The output tensor has shape `[4, 1, 1, 1]` and value:

    ```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

    1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

    ```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

    The output tensor has shape `[4, 1, 1, 3]` and value:

    ```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

    1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

    ```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

    The output tensor has shape `[4, 2, 2, 1]` and value:

    ```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

    1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

    ```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

    The output tensor has shape `[8, 1, 2, 1]` and value:

    ```prettyprint + x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + ```

    Among others, this operation is useful for reducing atrous convolution into + regular convolution.

    -> Tensor Value t

    output

    SpaceToBatch for 4-D tensors of type T.

    This is a legacy version of the more general SpaceToBatchND.

    Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + More specifically, this op outputs a copy of the input tensor where values from + the height and width dimensions are moved to the batch dimension. After + the zero-padding, both height and width of the input must be divisible by the + block size.

    cTCGreedyDecoder Source

    Arguments

    :: Tensor v1 Float

    inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

    -> Tensor v2 Int32

    sequence_length: A vector containing sequence lengths, size `(batch_size)`.

    -> (Tensor Value Int64, Tensor Value Int64, Tensor Value Int64, Tensor Value Float)

    (decoded_indices, decoded_values, decoded_shape, log_probability)

    • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + of a `SparseTensor2`. The rows store: [batch, time].
    • decoded_values: Values vector, size: `(total_decoded_outputs)`, + of a `SparseTensor2`. The vector stores the decoded classes.
    • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. + Values are: `[batch_size, max_decoded_length]`.
    • log_probability: Matrix, size `(batch_size x 1)`, containing sequence + log-probabilities.

    Performs greedy decoding on the logits given in inputs.

    A note about the attribute merge_repeated: if enabled, when + consecutive logits' maximum indices are the same, only the first of + these is emitted. Labeling the blank *, the sequence "A B B * B B" + becomes "A B" if merge_repeated = True and "A B B B B" if + merge_repeated = False.

    Regardless of the value of merge_repeated, if the maximum index of a given + time and batch corresponds to the blank, index `(num_classes - 1)`, no new + element is emitted.

    batchToSpaceND Source

    Arguments

    :: (TensorType t, TensorType tblock_shape, OneOf `[Int32, Int64]` tblock_shape, TensorType tcrops, OneOf `[Int32, Int64]` tcrops) 
    => Tensor v1 t

    input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

    -> Tensor v2 tblock_shape

    block_shape: 1-D with shape `[M]`, all values must be >= 1.

    -> Tensor v3 tcrops

    crops: 2-D with shape `[M, 2]`, all values must be >= 0. + `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + dimension `i + 1`, which corresponds to spatial dimension i. It is + required that + `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.

    This operation is equivalent to the following steps:

    1. Reshape input to reshaped of shape: + [block_shape[0], ..., block_shape[M-1], + batch / prod(block_shape), + input_shape[1], ..., input_shape[N-1]]
    2. Permute dimensions of reshaped to produce permuted of shape + [batch / prod(block_shape),

    input_shape[1], block_shape[0], + ..., + input_shape[M], block_shape[M-1],

    input_shape[M+1], ..., input_shape[N-1]]

    1. Reshape permuted to produce reshaped_permuted of shape + [batch / prod(block_shape),

    input_shape[1] * block_shape[0], + ..., + input_shape[M] * block_shape[M-1],

    input_shape[M+1], + ..., + input_shape[N-1]]

    1. Crop the start and end of dimensions `[1, ..., M]` of + reshaped_permuted according to crops to produce the output of shape: + [batch / prod(block_shape),

    input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + ..., + input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],

    input_shape[M+1], ..., input_shape[N-1]]

    Some examples:

    1. For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

    ```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

    The output tensor has shape `[1, 2, 2, 1]` and value:

    ```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

    1. For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

    ```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

    The output tensor has shape `[1, 2, 2, 3]` and value:

    ```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

    1. For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

    ```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

    The output tensor has shape `[1, 4, 4, 1]` and value:

    ```prettyprint + x = [[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]] + ```

    1. For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [2, 0]]`:

    ```prettyprint + x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + [[[0], [2], [4]]], [[[0], [10], [12]]], + [[[0], [5], [7]]], [[[0], [13], [15]]], + [[[0], [6], [8]]], [[[0], [14], [16]]]] + ```

    The output tensor has shape `[2, 2, 4, 1]` and value:

    ```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

    -> Tensor Value t

    output

    BatchToSpace for N-D tensors of type T.

    This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + `block_shape + [batch]`, interleaves these blocks back into the grid defined by + the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + the input. The spatial dimensions of this intermediate result are then + optionally cropped according to crops to produce the output. This is the + reverse of SpaceToBatch. See below for a precise description.

    pack Source

    Arguments

    :: TensorType t 
    => [Tensor v1 t]

    values: Must be of same shape and type.

    -> Tensor Value t

    output: The packed tensor.

    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

    Packs the N tensors in values into a tensor with rank one higher than each + tensor in values, by packing them along the axis dimension. + Given a list of tensors of shape `(A, B, C)`;

    if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. + if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. + Etc.

    For example:

    ```prettyprint + # x is [1, 4] + # y is [2, 5] + # z is [3, 6] + pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + ```

    This is the opposite of unpack.

    oneHot Source

    Arguments

    :: (TensorType t, TensorType tI, OneOf `[Int32, Int64, Word8]` tI) 
    => Tensor v1 tI

    indices: A tensor of indices.

    -> Tensor v2 Int32

    depth: A scalar defining the depth of the one hot dimension.

    -> Tensor v3 t

    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

    -> Tensor v4 t

    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

    -> Tensor Value t

    output: The one-hot tensor.

    Returns a one-hot tensor.

    The locations represented by indices in indices take value on_value, + while all other locations take value off_value.

    If the input indices is rank N, the output will have rank `N+1`, + The new axis is created at dimension axis (default: the new axis is + appended at the end).

    If indices is a scalar the output shape will be a vector of length depth.

    If indices is a vector of length features, the output shape will be: + ``` + features x depth if axis == -1 + depth x features if axis == 0 + ```

    If indices is a matrix (batch) with shape `[batch, features]`, + the output shape will be: + ``` + batch x features x depth if axis == -1 + batch x depth x features if axis == 1 + depth x batch x features if axis == 0 + ```

    Examples + =========

    Suppose that

    ``` + indices = [0, 2, -1, 1] + depth = 3 + on_value = 5.0 + off_value = 0.0 + axis = -1 + ```

    Then output is `[4 x 3]`:

    ```output = + [5.0 0.0 0.0] // one_hot(0) + [0.0 0.0 5.0] // one_hot(2) + [0.0 0.0 0.0] // one_hot(-1) + [0.0 5.0 0.0] // one_hot(1) + ```

    Suppose that

    ``` + indices = [0, 2, -1, 1] + depth = 3 + on_value = 0.0 + off_value = 3.0 + axis = 0 + ```

    Then output is `[3 x 4]`:

    ```output = + [0.0 3.0 3.0 3.0] + [3.0 3.0 3.0 0.0] + [3.0 3.0 3.0 3.0] + [3.0 0.0 3.0 3.0] + // ^ one_hot(0) + // ^ one_hot(2) + // ^ one_hot(-1) + // ^ one_hot(1) + ``` + Suppose that

    ``` + indices = [[0, 2], [1, -1]] + depth = 3 + on_value = 1.0 + off_value = 0.0 + axis = -1 + ```

    Then output is `[2 x 2 x 3]`:

    ```output = + [ + [1.0, 0.0, 0.0] // one_hot(0) + [0.0, 0.0, 1.0] // one_hot(2) + ][ + [0.0, 1.0, 0.0] // one_hot(1) + [0.0, 0.0, 0.0] // one_hot(-1) + ]```

    broadcastGradientArgs Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t) 
    => Tensor v1 t

    s0

    -> Tensor v2 t

    s1

    -> (Tensor Value t, Tensor Value t)

    (r0, r1)

    • r0
    • r1

    Return the reduction indices for computing gradients of s0 op s1 with broadcast.

    This is typically used by gradient computations for a broadcasting operation.

    matrixSetDiag Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Rank `k+1`, where `k >= 1`.

    -> Tensor v2 t

    diagonal: Rank k, where `k >= 1`.

    -> Tensor Value t

    output: Rank `k+1`, with `output.shape = input.shape`.

    Returns a batched matrix tensor with new batched diagonal values.

    Given input and diagonal, this operation returns a tensor with the + same shape and values as input, except for the diagonals of the innermost + matrices. These will be overwritten by the values in diagonal. + The batched matrices must be square.

    The output is computed as follows:

    Assume input has `k+1` dimensions `[I, J, K, ..., N, N]` and diagonal has + k dimensions `[I, J, K, ..., N]`. Then the output is a + tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

    • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
    • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.

    applyRMSProp Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    ms: Should be from a Variable().

    -> Tensor v3 t

    mom: Should be from a Variable().

    -> Tensor v4 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v5 t

    rho: Decay rate. Must be a scalar.

    -> Tensor v6 t

    momentum

    -> Tensor v7 t

    epsilon: Ridge term. Must be a scalar.

    -> Tensor v8 t

    grad: The gradient.

    -> Tensor Value t

    out: Same as "var".

    Update '*var' according to the RMSProp algorithm.

    Note that in dense implement of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implement, ms + and mom will not update in iterations the grad is zero.

    mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

    ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

    const Source

    Arguments

    :: TensorType dtype 
    => Tensor Value dtype

    output

    Returns a constant tensor.

    enter Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the child frame.

    -> Tensor Value t

    output: The same tensor as `data`.

    Creates or finds a child frame, and makes `data` available to the child frame.

    This op is used together with Exit to create loops in the graph. + The unique frame_name is used by the Executor to identify frames. If + is_constant is true, output is a constant in the child frame; otherwise + it may be changed in the child frame. At most parallel_iterations iterations + are run in parallel in the child frame.

    debugIdentity Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Input tensor, non-Reference type.

    -> Tensor Value t

    output: Output tensor that equals the input tensor.

    Debug Identity Op.

    Provides an identity mapping of the non-Ref type input tensor for debugging.

    debugNanCount Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Input tensor, non-Reference type.

    -> Tensor Value Int64

    output: An integer output tensor that is the number of NaNs in the input.

    Debug NaN Value Counter Op

    Counts number of NaNs in the input tensor, for debugging.

    batchNormWithGlobalNormalization Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Bool

    scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

    -> Float

    variance_epsilon: A small float number to avoid dividing by 0.

    -> Tensor v1 t

    t: A 4D input Tensor.

    -> Tensor v2 t

    m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

    -> Tensor v3 t

    v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

    -> Tensor v4 t

    beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

    -> Tensor v5 t

    gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this tensor will be multiplied + with the normalized tensor.

    -> Tensor Value t

    result

    Batch normalization.

    This op is deprecated. Prefer `tf.nn.batch_normalization`.

    batchMatrixDiag Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    diagonal

    -> Tensor Value t

    output

    unpack Source

    Arguments

    :: TensorType t 
    => Int64

    num

    -> Tensor v1 t

    value: 1-D or higher, with axis dimension size equal to num.

    -> [Tensor Value t]

    output: The list of tensors unpacked from value.

    Unpacks a given dimension of a rank-R tensor into num rank-`(R-1)` tensors.

    Unpacks num tensors from value by chipping it along the axis dimension. + For example, given a tensor of shape `(A, B, C, D)`;

    If `axis == 0` then the i'th tensor in output is the slice `value[i, :, :, :]` + and each tensor in output will have shape `(B, C, D)`. (Note that the + dimension unpacked along is gone, unlike split).

    If `axis == 1` then the i'th tensor in output is the slice `value[:, i, :, :]` + and each tensor in output will have shape `(A, C, D)`. + Etc.

    This is the opposite of pack.

    sparseSplit Source

    Arguments

    :: TensorType t 
    => Int64

    num_split: The number of ways to split.

    -> Tensor v1 Int64

    split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(shape))`.

    -> Tensor v2 Int64

    indices: 2-D tensor represents the indices of the sparse tensor.

    -> Tensor v3 t

    values: 1-D tensor represents the values of the sparse tensor.

    -> Tensor v4 Int64

    shape: 1-D. tensor represents the shape of the sparse tensor. + output indices: A list of 1-D tensors represents the indices of the output + sparse tensors.

    -> ([Tensor Value Int64], [Tensor Value t], [Tensor Value Int64])

    (output_indices, output_values, output_shape)

    • output_indices
    • output_values: A list of 1-D tensors represents the values of the output sparse + tensors.
    • output_shape: A list of 1-D tensors represents the shape of the output sparse + tensors.

    Split a SparseTensor into num_split tensors along one dimension.

    If the `shape[split_dim]` is not an integer multiple of num_split. Slices + `[0 : shape[split_dim] % num_split]` gets one extra dimension. + For example, if `split_dim = 1` and `num_split = 2` and the input is

    input_tensor = shape = [2, 7] + [ a d e ] + [b c ]

    Graphically the output tensors are:

    output_tensor[0] = shape = [2, 4] + [ a ] + [b c ]

    output_tensor[1] = shape = [2, 3] + [ d e ] + [ ]

    mirrorPad Source

    Arguments

    :: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
    => Tensor v1 t

    input: The input tensor to be padded.

    -> Tensor v2 tpaddings

    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

    -> Tensor Value t

    output: The padded tensor.

    Pads a tensor with mirrored values.

    This operation pads a input with mirrored values according to the paddings + you specify. paddings is an integer tensor with shape `[n, 2]`, where n is + the rank of input. For each dimension D of input, `paddings[D, 0]` indicates + how many values to add before the contents of input in that dimension, and + `paddings[D, 1]` indicates how many values to add after the contents of input + in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if copy_border is true + (if false, respectively).

    The padded size of each dimension D of the output is:

    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

    For example:

    ```prettyprint + # t is [[1, 2, 3], [4, 5, 6]]. + # paddings is [[1, 1]], [2, 2]]. + # mode is SYMMETRIC. + # rank of t is 2. + pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + [2, 1, 1, 2, 3, 3, 2] + [5, 4, 4, 5, 6, 6, 5] + [5, 4, 4, 5, 6, 6, 5]] + ```

    batchMatrixDiagPart Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input

    -> Tensor Value t

    diagonal

    fractionalMaxPoolGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    orig_input: Original input for fractional_max_pool

    -> Tensor v2 t

    orig_output: Original output for fractional_max_pool

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_max_pool.

    -> Tensor v4 Int64

    row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

    -> Tensor v5 Int64

    col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

    -> Tensor Value t

    output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

    Computes gradient of the FractionalMaxPool function.

    matchingFiles Source

    Arguments

    :: Tensor v1 ByteString

    pattern: A (scalar) shell wildcard pattern.

    -> Tensor Value ByteString

    filenames: A vector of matching filenames.

    Returns the set of files matching a pattern.

    Note that this routine only supports wildcard characters in the + basename portion of the pattern, not in the directory portion.

    tile Source

    Arguments

    :: (TensorType t, TensorType tmultiples, OneOf `[Int32, Int64]` tmultiples) 
    => Tensor v1 t

    input: 1-D or higher.

    -> Tensor v2 tmultiples

    multiples: 1-D. Length must be the same as the number of dimensions in input

    -> Tensor Value t

    output

    Constructs a tensor by tiling a given tensor.

    This operation creates a new tensor by replicating input multiples times. + The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + and the values of input are replicated `multiples[i]` times along the ith + dimension. For example, tiling `[a b c d]` by `[2]` produces + `[a b c d a b c d]`.

    sparseSparseMinimum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

    -> Tensor v2 t

    a_values: 1-D. N non-empty values corresponding to a_indices.

    -> Tensor v3 Int64

    a_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 Int64

    b_indices: counterpart to a_indices for the other operand.

    -> Tensor v5 t

    b_values: counterpart to a_values for the other operand; must be of the same dtype.

    -> Tensor v6 Int64

    b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

    -> (Tensor Value Int64, Tensor Value t)

    (output_indices, output_values)

    • output_indices: 2-D. The indices of the output SparseTensor.
    • output_values: 1-D. The values of the output SparseTensor.

    Returns the element-wise min of two SparseTensors.

    Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

    allCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to produce per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a learned unigram distribution.

    See explanations of candidate sampling and the data formats at + go/candidate-sampling.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    refSwitch Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The ref tensor to be forwarded to the appropriate output.

    -> Tensor v2 Bool

    pred: A scalar that specifies which output port will receive data.

    -> (Tensor Value t, Tensor Value t)

    (output_false, output_true)

    • output_false: If pred is false, data will be forwarded to this output.
    • output_true: If pred is true, data will be forwarded to this output.

    Forwards the ref tensor `data` to the output port determined by pred.

    If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

    See also Switch and Merge.

    mergeSummary Source

    Arguments

    :: [Tensor v1 ByteString]

    inputs: Can be of any shape. Each must contain serialized Summary protocol + buffers.

    -> Tensor Value ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Merges summaries.

    This op creates a + `Summary` + protocol buffer that contains the union of all the values in the input + summaries.

    When the Op is run, it reports an InvalidArgument error if multiple values + in the summaries to merge use the same tag.

    logicalNot Source

    Arguments

    :: Tensor v1 Bool

    x

    -> Tensor Value Bool

    y

    Returns the truth value of NOT x element-wise.

    lRNGrad Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t) 
    => Tensor v1 t

    input_grads: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 t

    input_image: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v3 t

    output_image: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor Value t

    output: The gradients for LRN.

    Gradients for Local Response Normalization.

    stringToNumber Source

    Arguments

    :: (TensorType out_type, OneOf `[Int32, Float]` out_type) 
    => Tensor v1 ByteString

    string_tensor

    -> Tensor Value out_type

    output: A Tensor of the same shape as the input string_tensor.

    Converts each string in the input Tensor to the specified numeric type.

    (Note that int32 overflow results in an error while float overflow + results in a rounded value.)

    sparseMatMul Source

    Arguments

    :: (TensorType ta, OneOf `[Word16, Float]` ta, TensorType tb, OneOf `[Word16, Float]` tb) 
    => Tensor v1 ta

    a

    -> Tensor v2 tb

    b

    -> Tensor Value Float

    product

    Multiply matrix "a" by matrix "b".

    The inputs must be two-dimensional matrices and the inner dimension of "a" must + match the outer dimension of "b". This op is optimized for the case where at + least one of "a" or "b" is sparse. The breakeven for using this versus a dense + matrix multiply on one platform was 30% zero values in the sparse matrix.

    merge Source

    Arguments

    :: TensorType t 
    => [Tensor v1 t]

    inputs: The input tensors, exactly one of which will become available.

    -> (Tensor Value t, Tensor Value Int32)

    (output, value_index)

    • output: Will be set to the available input tensor.
    • value_index: The index of the chosen input tensor in inputs.

    Forwards the value of an available tensor from inputs to output.

    Merge waits for at least one of the tensors in inputs to become available. + It is usually combined with Switch to implement branching.

    Merge forwards the first tensor for become available to output, and sets + value_index to its index in inputs.

    choleskyGrad Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

    -> Tensor v2 t

    grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

    -> Tensor Value t

    output: Symmetrized version of df/dA . Shape is `[..., M, M]`

    Computes the reverse mode backpropagated gradient of the Cholesky algorithm.

    For an explanation see "Differentiation of the Cholesky algorithm" by + Iain Murray http://arxiv.org/abs/1602.07527.

    batchCholeskyGrad Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    l

    -> Tensor v2 t

    grad

    -> Tensor Value t

    output

    tensorArrayGather Source

    Arguments

    :: TensorType dtype 
    => Tensor v1 ByteString

    handle: The handle to a TensorArray.

    -> Tensor v2 Int32

    indices: The locations in the TensorArray from which to read tensor elements.

    -> Tensor v3 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value dtype

    value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

    Gather specific elements from the TensorArray into output value.

    All elements selected by indices must have the same shape.

    resizeNearestNeighbor Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    images: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 Int32

    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

    -> Tensor Value t

    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

    Resize images to size using nearest neighbor interpolation.

    negTrain Source

    Arguments

    :: Int64

    num_negative_samples: Number of negative samples per example.

    -> Tensor v1 Float

    w_in: input word embedding.

    -> Tensor v2 Float

    w_out: output word embedding.

    -> Tensor v3 Int32

    examples: A vector of word ids.

    -> Tensor v4 Int32

    labels: A vector of word ids.

    -> Tensor v5 Float

    lr

    -> ControlNode 

    Training via negative sampling.

    tensorArrayGrad Source

    Arguments

    :: Tensor v1 ByteString

    handle: The handle to the forward TensorArray.

    -> Tensor v2 Float

    flow_in: A float scalar that enforces proper chaining of operations.

    -> Tensor Value ByteString

    grad_handle

    Creates a TensorArray for storing the gradients of values in the given handle.

    If the given TensorArray gradient already exists, returns a reference to it.

    Locks the size of the original TensorArray by disabling its dynamic size flag.

    • *A note about the input flow_in:**

    The handle flow_in forces the execution of the gradient lookup to occur + only after certain other operations have occurred. For example, when + the forward TensorArray is dynamically sized, writes to this TensorArray + may resize the object. The gradient TensorArray is statically sized based + on the size of the forward TensorArray when this operation executes. + Furthermore, the size of the forward TensorArray is frozen by this call. + As a result, the flow is used to ensure that the call to generate the gradient + TensorArray only happens after all writes are executed.

    In the case of dynamically sized TensorArrays, gradient computation should + only be performed on read operations that have themselves been chained via + flow to occur only after all writes have executed. That way the final size + of the forward TensorArray is known when this operation is called.

    • *A note about the source attribute:**

    TensorArray gradient calls use an accumulator TensorArray object. If + multiple gradients are calculated and run in the same session, the multiple + gradient nodes may accidentally flow throuth the same accumulator TensorArray. + This double counts and generally breaks the TensorArray gradient flow.

    The solution is to identify which gradient call this particular + TensorArray gradient is being called in. This is performed by identifying + a unique string (e.g. "gradients", "gradients_1", ...) from the input + gradient Tensor's name. This string is used as a suffix when creating + the TensorArray gradient object here (the attribute source).

    The attribute source is added as a suffix to the forward TensorArray's + name when performing the creation / lookup, so that each separate gradient + calculation gets its own TensorArray accumulator.

    audioSummary Source

    Arguments

    :: Float

    sample_rate: The sample rate of the signal in hertz.

    -> Tensor v1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v2 Float

    tensor: 2-D of shape `[batch_size, frames]`.

    -> Tensor Value ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Outputs a Summary protocol buffer with audio.

    The summary has up to max_outputs summary values containing audio. The + audio is built from tensor which must be 3-D with shape `[batch_size, + frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

    The tag argument is a scalar Tensor of type string. It is used to + build the tag of the summary values:

    • If max_outputs is 1, the summary value tag is '*tag*/audio'.
    • If max_outputs is greater than 1, the summary value tags are + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

    noOp :: ControlNode Source

    Does nothing. Only useful as a placeholder for control edges.

    nextIteration Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    data: The tensor to be made available to the next iteration.

    -> Tensor Value t

    output: The same tensor as `data`.

    Makes its input available to the next iteration.

    softplusGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding softplus operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding softplus operation.

    -> Tensor Value t

    backprops: The gradients: `gradients / (1 + exp(-features))`.

    Computes softplus gradients for a softplus operation.

    svd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Double, Float]` t) 
    => Tensor v1 t

    input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

    -> (Tensor Value t, Tensor Value t, Tensor Value t)

    (s, u, v)

    • s: Singular values. Shape is `[..., P]`.
    • u: Left singular vectors. If full_matrices is False then shape is + `[..., M, M]`; if full_matrices is True then shape is + `[..., M, P]`. Undefined if compute_uv is False.
    • v: Left singular vectors. If full_matrices is False then shape is + `[..., N, N]`. If full_matrices is True then shape is `[..., N, P]`. + Undefined if compute_uv is false.

    Computes the singular value decompositions of one or more matrices.

    Computes the SVD of each inner matrix in input such that + `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`

    ```prettyprint + # a is a tensor containing a batch of matrices. + # s is a tensor of singular values for each matrix. + # u is the tensor containing of left singular vectors for each matrix. + # v is the tensor containing of right singular vectors for each matrix. + s, u, v = svd(a) + s, _, _ = svd(a, compute_uv=False) + ```

    hSVToRGB Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

    -> Tensor Value t

    output: images converted to RGB.

    Convert one or more images from HSV to RGB.

    Outputs a tensor of the same shape as the images tensor, containing the RGB + value of the pixels. The output is only well defined if the value in images + are in `[0,1]`.

    See rgb_to_hsv for a description of the HSV encoding.

    parameterizedTruncatedNormal Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t, TensorType dtype, OneOf `[Word16, Double, Float]` dtype) 
    => Tensor v1 t

    shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

    -> Tensor v2 dtype

    means: The mean parameter of each batch.

    -> Tensor v3 dtype

    stdevs: The standard deviation parameter of each batch. Must be greater than 0.

    -> Tensor v4 dtype

    minvals: The minimum cutoff. May be -infinity.

    -> Tensor v5 dtype

    maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + for each batch.

    -> Tensor Value dtype

    output: A matrix of shape num_batches x samples_per_batch, filled with random + truncated normal values using the parameters for each row.

    Outputs random values from a normal distribution. The parameters may each be a

    scalar which applies to the entire output, or a vector of length shape[0] which + stores the parameters for each batch.

    square Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes square of x element-wise.

    I.e., \(y = x * x = x^2\).

    elu Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes exponential linear: `exp(features) - 1` if < 0, features otherwise.

    See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)

    lookupTableExport Source

    Arguments

    :: (TensorType tkeys, TensorType tvalues) 
    => Tensor v1 ByteString

    table_handle: Handle to the table.

    -> (Tensor Value tkeys, Tensor Value tvalues)

    (keys, values)

    • keys: Vector of all keys present in the table.
    • values: Tensor of all values in the table. Indexed in parallel with keys.

    Outputs all keys and values in the table.

    lookupTableSize Source

    Arguments

    :: Tensor v1 ByteString

    table_handle: Handle to the table.

    -> Tensor Value Int64

    size: Scalar that contains number of elements in the table.

    Computes the number of elements in the given table.

    avgPoolGrad Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 Int32

    orig_input_shape: 1-D. Shape of the original input to avg_pool.

    -> Tensor v2 t

    grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + the output of avg_pool.

    -> Tensor Value t

    output: 4-D. Gradients w.r.t. the input of avg_pool.

    Computes gradients of the average pooling function.

    computeAccidentalHits Source

    Arguments

    :: Int64

    num_true: Number of true labels per context.

    -> Tensor v1 Int64

    true_classes: The true_classes output of UnpackSparseLabels.

    -> Tensor v2 Int64

    sampled_candidates: The sampled_candidates output of CandidateSampler.

    -> (Tensor Value Int32, Tensor Value Int64, Tensor Value Float)

    (indices, ids, weights)

    • indices: A vector of indices corresponding to rows of true_candidates.
    • ids: A vector of IDs of positions in sampled_candidates that match a true_label + for the row with the corresponding index in indices.
    • weights: A vector of the same length as indices and ids, in which each element + is -FLOAT_MAX.

    Computes the ids of the positions in sampled_candidates that match true_labels.

    When doing log-odds NCE, the result of this op should be passed through a + SparseToDense op, then added to the logits of the sampled candidates. This has + the effect of removing the sampled labels that match the true labels by + making the classifier sure that they are sampled labels.

    cTCLoss Source

    Arguments

    :: Tensor v1 Float

    inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

    -> Tensor v2 Int64

    labels_indices: The indices of a `SparseTensor2`. + `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + `(batch b, time t)`.

    -> Tensor v3 Int32

    labels_values: The values (labels) associated with the given batch and time.

    -> Tensor v4 Int32

    sequence_length: A vector containing sequence lengths (batch).

    -> (Tensor Value Float, Tensor Value Float)

    (loss, gradient)

    • loss: A vector (batch) containing log-probabilities.
    • gradient: The gradient of loss. 3-D, shape: + `(max_time x batch_size x num_classes)`.

    Calculates the CTC Loss (log probability) for each batch entry. Also calculates

    the gradient. This class performs the softmax operation for you, so inputs + should be e.g. linear projections of outputs by an LSTM.

    avgPool3D Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Value t

    output: The average pooled output tensor.

    Performs 3D average pooling on the input.

    inv Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the reciprocal of x element-wise.

    I.e., \(y = 1 / x\).

    stackPop Source

    Arguments

    :: TensorType elem_type 
    => Tensor v1 ByteString

    handle: The handle to a stack.

    -> Tensor Value elem_type

    elem: The tensor that is popped from the top of the stack.

    Pop the element at the top of the stack.

    paddingFIFOQueue Source

    Arguments

    :: Tensor Value ByteString

    handle: The handle to the queue.

    A queue that produces elements in first-in first-out order.

    Variable-size shapes are allowed by setting the corresponding shape dimensions + to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + size of any given element in the minibatch. See below for details.

    batchSelfAdjointEigV2 Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> (Tensor Value t, Tensor Value t)

    (e, v)

    • e
    • v

    batchMatrixTriangularSolve Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix

    -> Tensor v2 t

    rhs

    -> Tensor Value t

    output

    batchMatrixSolveLs Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    matrix

    -> Tensor v2 t

    rhs

    -> Tensor v3 Double

    l2_regularizer

    -> Tensor Value t

    output

    batchSvd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Double, Float]` t) 
    => Tensor v1 t

    input

    -> (Tensor Value t, Tensor Value t, Tensor Value t)

    (s, u, v)

    • s
    • u
    • v

    tensorSummary Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    tensor: A tensor to serialize.

    -> Tensor Value ByteString

    summary

    Outputs a Summary protocol buffer with a tensor.

    sparseSoftmaxCrossEntropyWithLogits Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t, TensorType tlabels, OneOf `[Int32, Int64]` tlabels) 
    => Tensor v1 t

    features: batch_size x num_classes matrix

    -> Tensor v2 tlabels

    labels: batch_size vector with values in [0, num_classes). + This is the label for the given minibatch entry.

    -> (Tensor Value t, Tensor Value t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    Computes softmax cross entropy cost and gradients to backpropagate.

    Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept + a matrix of label probabilities, but rather a single label per row + of features. This label is considered to have probability 1.0 for the + given row.

    Inputs are the logits, not probabilities.

    maxPoolWithArgmax Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t, TensorType targmax, OneOf `[Int32, Int64]` targmax) 
    => Tensor v1 t

    input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

    -> (Tensor Value t, Tensor Value targmax)

    (output, argmax)

    • output: The max pooled output tensor.
    • argmax: 4-D. The flattened indices of the max values chosen for each output.

    Performs max pooling on the input and outputs both max values and indices.

    The indices in argmax are flattened, so that a maximum value at position + `[b, y, x, c]` becomes flattened index + `((b * height + y) * width + x) * channels + c`.

    fFT Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its 1D Fourier Transform.

    Compute the 1-dimensional discrete Fourier Transform over the inner-most

    dimension of input.

    histogramSummary Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 ByteString

    tag: Scalar. Tag to use for the Value.

    -> Tensor v2 t

    values: Any shape. Values to use to build the histogram.

    -> Tensor Value ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Outputs a Summary protocol buffer with a histogram.

    The generated + `Summary` + has one summary value containing a histogram for values.

    This op reports an InvalidArgument error if any value is not finite.

    pad Source

    Arguments

    :: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
    => Tensor v1 t

    input

    -> Tensor v2 tpaddings

    paddings

    -> Tensor Value t

    output

    Pads a tensor with zeros.

    This operation pads a input with zeros according to the paddings you + specify. paddings is an integer tensor with shape `[Dn, 2]`, where n is the + rank of input. For each dimension D of input, `paddings[D, 0]` indicates + how many zeros to add before the contents of input in that dimension, and + `paddings[D, 1]` indicates how many zeros to add after the contents of input + in that dimension.

    The padded size of each dimension D of the output is:

    `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

    For example:

    ```prettyprint + # t is [[1, 1], [2, 2]] + # paddings is [[1, 1], [2, 2]] + # rank of t is 2 + pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + [0, 0, 1, 1, 0, 0] + [0, 0, 2, 2, 0, 0] + [0, 0, 0, 0, 0, 0]] + ```

    batchIFFT3D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    imageSummary Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Word8, Float]` t) 
    => Tensor v1 ByteString

    tag: Scalar. Used to build the tag attribute of the summary values.

    -> Tensor v2 t

    tensor: 4-D of shape `[batch_size, height, width, channels]` where + channels is 1, 3, or 4.

    -> Tensor Value ByteString

    summary: Scalar. Serialized Summary protocol buffer.

    Outputs a Summary protocol buffer with images.

    The summary has up to max_images summary values containing images. The + images are built from tensor which must be 4-D with shape `[batch_size, + height, width, channels]` and where channels can be:

    • 1: tensor is interpreted as Grayscale.
    • 3: tensor is interpreted as RGB.
    • 4: tensor is interpreted as RGBA.

    The images have the same number of channels as the input tensor. For float + input, the values are normalized one image at a time to fit in the range + `[0, 255]`. uint8 values are unchanged. The op uses two different + normalization algorithms:

    • If the input values are all positive, they are rescaled so the largest one + is 255.
    • If any input value is negative, the values are shifted so input value 0.0 + is at 127. They are then rescaled so that either the smallest value is 0, + or the largest one is 255.

    The tag argument is a scalar Tensor of type string. It is used to + build the tag of the summary values:

    • If max_images is 1, the summary value tag is '*tag*/image'.
    • If max_images is greater than 1, the summary value tags are + generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.

    The bad_color argument is the color to use in the generated images for + non-finite input values. It is a unit8 1-D tensor of length channels. + Each element must be in the range `[0, 255]` (It represents the value of a + pixel in the output image). Non-finite values in the input tensor are + replaced by this tensor in the output image. The default value is the color + red.

    segmentSum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the sum along segments of a tensor.

    Read the section on Segmentation + for an explanation of segments.

    Computes a tensor such that + \(output_i = sum_j data_j\) where sum is over j such + that `segment_ids[j] == i`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentSum.png" alt + /div

    encodeJpeg Source

    Arguments

    :: Tensor v1 Word8

    image: 3-D with shape `[height, width, channels]`.

    -> Tensor Value ByteString

    contents: 0-D. JPEG-encoded image.

    JPEG-encode an image.

    image is a 3-D uint8 Tensor of shape `[height, width, channels]`.

    The attr format can be used to override the color format of the encoded + output. Values can be:

    • `''`: Use a default format based on the number of channels in the image.
    • grayscale: Output a grayscale JPEG image. The channels dimension + of image must be 1.
    • rgb: Output an RGB JPEG image. The channels dimension + of image must be 3.

    If format is not specified or is the empty string, a default format is picked + in function of the number of channels in image:

    • 1: Output a grayscale image.
    • 3: Output an RGB image.

    batchNormWithGlobalNormalizationGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Bool

    scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

    -> Float

    variance_epsilon: A small float number to avoid dividing by 0.

    -> Tensor v1 t

    t: A 4D input Tensor.

    -> Tensor v2 t

    m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

    -> Tensor v3 t

    v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

    -> Tensor v4 t

    gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this Tensor will be multiplied + with the normalized Tensor.

    -> Tensor v5 t

    backprop: 4D backprop Tensor.

    -> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)

    (dx, dm, dv, db, dg)

    • dx: 4D backprop tensor for input.
    • dm: 1D backprop tensor for mean.
    • dv: 1D backprop tensor for variance.
    • db: 1D backprop tensor for beta.
    • dg: 1D backprop tensor for gamma.

    Gradients for batch normalization.

    This op is deprecated. See `tf.nn.batch_normalization`.

    biasAddV1 Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    value: Any number of dimensions.

    -> Tensor v2 t

    bias: 1-D with size the last dimension of value.

    -> Tensor Value t

    output: Broadcasted sum of value and bias.

    Adds bias to value.

    This is a deprecated version of BiasAdd and will be soon removed.

    This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

    invertPermutation Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t) 
    => Tensor v1 t

    x: 1-D.

    -> Tensor Value t

    y: 1-D.

    Computes the inverse permutation of a tensor.

    This operation computes the inverse of an index permutation. It takes a 1-D + integer tensor x, which represents the indices of a zero-based array, and + swaps each value with its index position. In other words, for an output tensor + y and an input tensor x, this operation computes the following:

    `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`

    The values must include 0. There can be no duplicate values or negative values.

    For example:

    ```prettyprint + # tensor x is [3, 4, 0, 2, 1] + invert_permutation(x) ==> [2, 4, 3, 0, 1] + ```

    mirrorPadGrad Source

    Arguments

    :: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
    => Tensor v1 t

    input: The input tensor to be folded.

    -> Tensor v2 tpaddings

    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

    -> Tensor Value t

    output: The folded tensor.

    Gradient op for MirrorPad op. This op folds a mirror-padded tensor.

    This operation folds the padded areas of input by MirrorPad according to the + paddings you specify. paddings must be the same as paddings argument + given to the corresponding MirrorPad op.

    The folded size of each dimension D of the output is:

    `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`

    For example:

    ```prettyprint + # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + # paddings is [[0, 1]], [0, 1]]. + # mode is SYMMETRIC. + # rank of t is 2. + pad(t, paddings) ==> [[ 1, 5] + [11, 28]] + ```

    reverse Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    tensor: Up to 8-D.

    -> Tensor v2 Bool

    dims: 1-D. The dimensions to reverse.

    -> Tensor Value t

    output: The same shape as tensor.

    Reverses specific dimensions of a tensor.

    Given a tensor, and a bool tensor dims representing the dimensions + of tensor, this operation reverses each dimension i of tensor where + `dims[i]` is True.

    tensor can have up to 8 dimensions. The number of dimensions + of tensor must equal the number of elements in dims. In other words:

    `rank(tensor) = size(dims)`

    For example:

    ```prettyprint + # tensor t is [[[[ 0, 1, 2, 3], + # [ 4, 5, 6, 7], + # [ 8, 9, 10, 11]], + # [[12, 13, 14, 15], + # [16, 17, 18, 19], + # [20, 21, 22, 23]]]] + # tensor t shape is [1, 2, 3, 4]

    # dims is [False, False, False, True] + reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + [ 7, 6, 5, 4], + [ 11, 10, 9, 8]], + [[15, 14, 13, 12], + [19, 18, 17, 16], + [23, 22, 21, 20]]]]

    # dims is [False, True, False, False] + reverse(t, dims) ==> [[[[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23] + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]]]

    # dims is [False, False, True, False] + reverse(t, dims) ==> [[[[8, 9, 10, 11], + [4, 5, 6, 7], + [0, 1, 2, 3]] + [[20, 21, 22, 23], + [16, 17, 18, 19], + [12, 13, 14, 15]]]] + ```

    conv2D Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor v2 t

    filter

    -> Tensor Value t

    output

    Computes a 2-D convolution given 4-D input and filter tensors.

    Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, out_channels]`, this op + performs the following:

    1. Flattens the filter to a 2-D matrix with shape + `[filter_height * filter_width * in_channels, output_channels]`.
    2. Extracts image patches from the input tensor to form a *virtual* + tensor of shape `[batch, out_height, out_width, + filter_height * filter_width * in_channels]`.
    3. For each patch, right-multiplies the filter matrix and the image patch + vector.

    In detail, with the default NHWC format,

    output[b, i, j, k] = + sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + filter[di, dj, q, k]

    Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

    conv2DBackpropInput Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 Int32

    input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

    -> Tensor v2 t

    filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

    -> Tensor Value t

    output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

    Computes the gradients of convolution with respect to the input.

    readerSerializeState Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor Value ByteString

    state

    Produce a string tensor that encodes the state of a Reader.

    Not all Readers support being serialized, so this can produce an + Unimplemented error.

    temporaryVariable Source

    Arguments

    :: TensorType dtype 
    => Tensor Value dtype

    ref: A reference to the variable tensor.

    Returns a tensor that may be mutated, but only persists within a single step.

    This is an experimental op for internal use only and it is possible to use this + op in unsafe ways. DO NOT USE unless you fully understand the risks.

    It is the caller's responsibility to ensure that ref is eventually passed to a + matching DestroyTemporaryVariable op after all other uses have completed.

    Outputs a ref to the tensor state so it may be read or modified.

    E.g. + var = state_ops._temporary_variable([1, 2], types.float_) + var_name = var.op.name + var = state_ops.assign(var, [[4.0, 5.0]]) + var = state_ops.assign_add(var, [[6.0, 7.0]]) + final = state_ops._destroy_temporary_variable(var, var_name=var_name)

    cropAndResize Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

    -> Tensor v2 Float

    boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

    -> Tensor v3 Int32

    box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

    -> Tensor v4 Int32

    crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + cropped image patches are resized to this size. The aspect ratio of the image + content is not preserved. Both crop_height and crop_width need to be + positive.

    -> Tensor Value Float

    crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

    Extracts crops from the input image tensor and bilinearly resizes them (possibly

    with aspect ratio change) to a common output size specified by crop_size. This + is more general than the crop_to_bounding_box op which extracts a fixed size + slice from the input image and does not allow resizing or aspect ratio change.

    Returns a tensor with crops from the input image at positions defined at the + bounding box locations in boxes. The cropped boxes are all resized (with + bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The + result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.

    maxPoolGrad Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t) 
    => Tensor v1 t

    orig_input: The original input tensor.

    -> Tensor v2 t

    orig_output: The original output tensor.

    -> Tensor v3 t

    grad: 4-D. Gradients w.r.t. the output of max_pool.

    -> Tensor Value t

    output: Gradients w.r.t. the input to max_pool.

    Computes gradients of the maxpooling function.

    fusedResizeAndPadConv2D Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

    -> Tensor v2 Int32

    size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

    -> Tensor v3 Int32

    paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

    -> Tensor v4 t

    filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

    -> Tensor Value t

    output

    Performs a resize and padding as a preprocess during a convolution.

    It's often possible to do spatial transformations more efficiently as part of + the packing stage of a convolution, so this op allows for an optimized + implementation where these stages are fused together. This prevents the need to + write out the intermediate results as whole tensors, reducing memory pressure, + and we can get some latency gains by merging the transformation calculations. + The data_format attribute for Conv2D isn't supported by this op, and defaults to + NHWC order. + Internally this op uses a single per-graph scratch buffer, which means that it + will block if multiple versions are being run in parallel. This is because this + operator is primarily an optimization to minimize memory usage.

    randomUniform Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t, TensorType dtype, OneOf `[Word16, Double, Float]` dtype) 
    => Tensor v1 t

    shape: The shape of the output tensor.

    -> Tensor Value dtype

    output: A tensor of the specified shape filled with uniform random values.

    Outputs random values from a uniform distribution.

    The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded.

    depthwiseConv2dNative Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor v2 t

    filter

    -> Tensor Value t

    output

    Computes a 2-D depthwise convolution given 4-D input and filter tensors.

    Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + in_channels convolutional filters of depth 1, depthwise_conv2d applies + a different filter to each input channel (expanding from 1 channel to + channel_multiplier channels for each), then concatenates the results + together. Thus, the output has `in_channels * channel_multiplier` channels.

    for k in 0..in_channels-1 + for q in 0..channel_multiplier-1 + output[b, i, j, k * channel_multiplier + q] = + sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + filter[di, dj, k, q]

    Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

    sparseApplyAdadelta Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    accum_update: : Should be from a Variable().

    -> Tensor v4 t

    lr: Learning rate. Must be a scalar.

    -> Tensor v5 t

    rho: Decay factor. Must be a scalar.

    -> Tensor v6 t

    epsilon: Constant factor. Must be a scalar.

    -> Tensor v7 t

    grad: The gradient.

    -> Tensor v8 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor Value t

    out: Same as "var".

    var: Should be from a Variable().

    depthwiseConv2dNativeBackpropFilter Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

    -> Tensor v2 Int32

    filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

    -> Tensor Value t

    output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

    Computes the gradients of depthwise convolution with respect to the filter.

    conv3D Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

    -> Tensor v2 t

    filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + out_channels]`. in_channels must match between input and filter.

    -> Tensor Value t

    output

    Computes a 3-D convolution given 5-D input and filter tensors.

    In signal processing, cross-correlation is a measure of similarity of + two waveforms as a function of a time-lag applied to one of them. This + is also known as a sliding dot product or sliding inner-product.

    Our Conv3D implements a form of cross-correlation.

    greaterEqual Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value Bool

    z

    Returns the truth value of (x >= y) element-wise.

    • NOTE*: GreaterEqual supports broadcasting. More about broadcasting + here

    sparseDenseCwiseAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    sp_values: 1-D. N non-empty values corresponding to sp_indices.

    -> Tensor v3 Int64

    sp_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 t

    dense: R-D. The dense Tensor operand.

    -> Tensor Value t

    output: 1-D. The N values that are operated on.

    Adds up a SparseTensor and a dense Tensor, using these special rules:

    1. Broadcasts the dense side to have the same shape as the sparse side, if + eligible;
    2. Then, only the dense values pointed to by the indices of the SparseTensor + participate in the cwise addition.

    By these rules, the result is a logical SparseTensor with exactly the same + indices and shape, but possibly with different non-zero values. The output of + this Op is the resultant non-zero values.

    conv3DBackpropFilter Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, depth, rows, cols, in_channels]`.

    -> Tensor v2 t

    filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

    -> Tensor v3 t

    out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

    -> Tensor Value t

    output

    Computes the gradients of 3-D convolution with respect to the filter.

    conv3DBackpropInputV2 Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int32

    input_sizes: An integer vector representing the tensor shape of input, + where input is a 5-D + `[batch, depth, rows, cols, in_channels]` tensor.

    -> Tensor v2 t

    filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

    -> Tensor v3 t

    out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

    -> Tensor Value t

    output

    Computes the gradients of 3-D convolution with respect to the input.

    mod Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns element-wise remainder of division.

    • NOTE*: Mod supports broadcasting. More about broadcasting + here

    refMerge Source

    Arguments

    :: TensorType t 
    => [Tensor v1 t]

    inputs: The input tensors, exactly one of which will become available.

    -> (Tensor Value t, Tensor Value Int32)

    (output, value_index)

    • output: Will be set to the available input tensor.
    • value_index: The index of the chosen input tensor in inputs.

    Forwards the value of an available tensor from inputs to output.

    Merge waits for at least one of the tensors in inputs to become available. + It is usually combined with Switch to implement branching.

    Merge forwards the first tensor for become available to output, and sets + value_index to its index in inputs.

    conv3DBackpropFilterV2 Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, depth, rows, cols, in_channels]`.

    -> Tensor v2 Int32

    filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 5-D + `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + tensor.

    -> Tensor v3 t

    out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

    -> Tensor Value t

    output

    Computes the gradients of 3-D convolution with respect to the filter.

    serializeManySparse Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int64

    sparse_indices: 2-D. The indices of the minibatch SparseTensor.

    -> Tensor v2 t

    sparse_values: 1-D. The values of the minibatch SparseTensor.

    -> Tensor v3 Int64

    sparse_shape: 1-D. The shape of the minibatch SparseTensor.

    -> Tensor Value ByteString

    serialized_sparse

    Serialize an N-minibatch SparseTensor into an `[N, 3]` string Tensor.

    The SparseTensor must have rank R greater than 1, and the first dimension + is treated as the minibatch dimension. Elements of the SparseTensor + must be sorted in increasing order of this first dimension. The serialized + SparseTensor objects going into each row of serialized_sparse will have + rank `R-1`.

    The minibatch size N is extracted from `sparse_shape[0]`.

    avgPool3DGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int32

    orig_input_shape: The original input dimensions.

    -> Tensor v2 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Value t

    output: The backprop for input.

    Computes gradients of average pooling function.

    maxPool3DGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Float

    orig_input: The original input tensor.

    -> Tensor v2 Float

    orig_output: The original output tensor.

    -> Tensor v3 t

    grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

    -> Tensor Value t

    output

    Computes gradients of max pooling function.

    sparseReduceSum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    input_values: 1-D. N non-empty values corresponding to input_indices.

    -> Tensor v3 Int64

    input_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 Int32

    reduction_axes: 1-D. Length-K vector containing the reduction axes.

    -> Tensor Value t

    output: `R-K`-D. The reduced Tensor.

    Computes the sum of elements across dimensions of a SparseTensor.

    This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_sum()`. In particular, this Op also returns a dense Tensor + instead of a sparse one.

    Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

    If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

    relu Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes rectified linear: `max(features, 0)`.

    l2Loss Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    t: Typically 2-D, but may have any dimensions.

    -> Tensor Value t

    output: 0-D.

    L2 Loss.

    Computes half the L2 norm of a tensor without the sqrt:

    output = sum(t ** 2) / 2

    readerRestoreState Source

    Arguments

    :: Tensor v1 ByteString

    reader_handle: Handle to a Reader.

    -> Tensor v2 ByteString

    state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

    -> ControlNode 

    Restore a reader to a previously saved state.

    Not all Readers support being restored, so this can produce an + Unimplemented error.

    shape Source

    Arguments

    :: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
    => Tensor v1 t

    input

    -> Tensor Value out_type

    output

    Returns the shape of a tensor.

    This operation returns a 1-D integer tensor representing the shape of input.

    For example:

    ```prettyprint + # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + shape(t) ==> [2, 2, 3] + ```

    softmaxCrossEntropyWithLogits Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    features: batch_size x num_classes matrix

    -> Tensor v2 t

    labels: batch_size x num_classes matrix + The caller must ensure that each batch of labels represents a valid + probability distribution.

    -> (Tensor Value t, Tensor Value t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    Computes softmax cross entropy cost and gradients to backpropagate.

    Inputs are the logits, not probabilities.

    maxPool Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Float]` t) 
    => Tensor v1 t

    input: 4-D input to pool over.

    -> Tensor Value t

    output: The max pooled output tensor.

    Performs max pooling on the input.

    dilation2DBackpropInput Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, depth]`.

    -> Tensor v2 t

    filter: 3-D with shape `[filter_height, filter_width, depth]`.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

    -> Tensor Value t

    in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

    Computes the gradient of morphological 2-D dilation with respect to the input.

    equal Source

    Returns the truth value of (x == y) element-wise.

    • NOTE*: Equal supports broadcasting. More about broadcasting + here

    dilation2DBackpropFilter Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, depth]`.

    -> Tensor v2 t

    filter: 3-D with shape `[filter_height, filter_width, depth]`.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

    -> Tensor Value t

    filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

    Computes the gradient of morphological 2-D dilation with respect to the filter.

    reluGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding Relu operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

    -> Tensor Value t

    backprops: `gradients * (features > 0)`.

    Computes rectified linear gradients for a Relu operation.

    relu6 Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes rectified linear 6: `min(max(features, 0), 6)`.

    resizeBicubic Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    images: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 Int32

    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

    -> Tensor Value Float

    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

    Resize images to size using bicubic interpolation.

    Input images can be of different types but output images are always float.

    relu6Grad Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding Relu6 operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding Relu6 operation.

    -> Tensor Value t

    backprops: The gradients: + `gradients * features * (features > 0) * (features < 6)`.

    Computes rectified linear 6 gradients for a Relu6 operation.

    sparseTensorDenseMatMul Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int64

    a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

    -> Tensor v2 t

    a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

    -> Tensor v3 Int64

    a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

    -> Tensor v4 t

    b: 2-D. A dense Matrix.

    -> Tensor Value t

    product

    Multiply SparseTensor (of rank 2) A by dense matrix B.

    No validity checking is performed on the indices of A. However, the following + input format is recommended for optimal behavior:

    if adjoint_a == false: + A should be sorted in lexicographically increasing order. Use SparseReorder + if you're not sure. + if adjoint_a == true: + A should be sorted in order of increasing dimension 1 (i.e., "column major" + order instead of "row major" order).

    softplus Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes softplus: `log(exp(features) + 1)`.

    batchMatMul Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t) 
    => Tensor v1 t

    x: 3-D or higher with shape `[..., r_x, c_x]`.

    -> Tensor v2 t

    y: 3-D or higher with shape `[..., r_y, c_y]`.

    -> Tensor Value t

    output: 3-D or higher with shape `[..., r_o, c_o]`

    Multiplies slices of two tensors in batches.

    Multiplies all slices of Tensor x and y (each slice can be + viewed as an element of a batch), and arranges the individual results + in a single output tensor of the same batch size. Each of the + individual slices can optionally be adjointed (to adjoint a matrix + means to transpose and conjugate it) before multiplication by setting + the adj_x or adj_y flag to True, which are by default False.

    The input tensors x and y are 3-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`.

    The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:

    r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y

    It is computed as:

    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])

    softsignGrad Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding softsign operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding softsign operation.

    -> Tensor Value t

    backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

    Computes softsign gradients for a softsign operation.

    lessEqual Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value Bool

    z

    Returns the truth value of (x <= y) element-wise.

    • NOTE*: LessEqual supports broadcasting. More about broadcasting + here

    logSoftmax Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Value t

    logsoftmax: Same shape as logits.

    Computes log softmax activations.

    For each batch i and class j we have

    logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))

    inTopK Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t) 
    => Int64

    k: Number of top elements to look at for computing precision.

    -> Tensor v1 Float

    predictions: A batch_size x classes tensor.

    -> Tensor v2 t

    targets: A batch_size vector of class ids.

    -> Tensor Value Bool

    precision: Computed Precision at k as a `bool Tensor`.

    Says whether the targets are in the top K predictions.

    This outputs a batch_size bool array, an entry `out[i]` is true if the + prediction for the target class is among the top k predictions among + all predictions for example i. Note that the behavior of InTopK differs + from the TopK op in its handling of ties; if multiple classes have the + same prediction value and straddle the top-k boundary, all of those + classes are considered to be in the top k.

    More formally, let

    \(predictions_i\) be the predictions for all classes for example i, + \(targets_i\) be the target class for example i, + \(out_i\) be the output for example i,

    $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$

    matrixDiag Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    diagonal: Rank k, where `k >= 1`.

    -> Tensor Value t

    output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

    Returns a batched diagonal tensor with a given batched diagonal values.

    Given a diagonal, this operation returns a tensor with the diagonal and + everything else padded with zeros. The diagonal is computed as follows:

    Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a + tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

    `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.

    For example:

    ```prettyprint + # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]]

    and diagonal.shape = (2, 4)

    tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]], + [[5, 0, 0, 0] + [0, 6, 0, 0] + [0, 0, 7, 0] + [0, 0, 0, 8]]]

    which has shape (2, 4, 4) + ```

    maxPool3D Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

    -> Tensor Value t

    output: The max pooled output tensor.

    Performs 3D max pooling on the input.

    topK Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Int64

    k: Number of top elements to look for along the last dimension (along each + row for matrices).

    -> Tensor v1 t

    input: 1-D or higher with last dimension at least k.

    -> (Tensor Value t, Tensor Value Int32)

    (values, indices)

    • values: The k largest elements along each last dimensional slice.
    • indices: The indices of values within the last dimension of input.

    Finds values and indices of the k largest elements for the last dimension.

    If the input is a vector (rank-1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + j-th largest entry in input, and its index is `indices[j]`.

    For matrices (resp. higher rank input), computes the top k entries in each + row (resp. vector along the last dimension). Thus,

    values.shape = indices.shape = input.shape[:-1] + [k]

    If two elements are equal, the lower-index element appears first.

    If k varies dynamically, use TopKV2 below.

    topKV2 Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    input: 1-D or higher with last dimension at least k.

    -> Tensor v2 Int32

    k: 0-D. Number of top elements to look for along the last dimension (along each + row for matrices).

    -> (Tensor Value t, Tensor Value Int32)

    (values, indices)

    • values: The k largest elements along each last dimensional slice.
    • indices: The indices of values within the last dimension of input.

    Finds values and indices of the k largest elements for the last dimension.

    If the input is a vector (rank-1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + j-th largest entry in input, and its index is `indices[j]`.

    For matrices (resp. higher rank input), computes the top k entries in each + row (resp. vector along the last dimension). Thus,

    values.shape = indices.shape = input.shape[:-1] + [k]

    If two elements are equal, the lower-index element appears first.

    This is the same as TopK, but takes k as in input rather than an attr.

    fractionalMaxPool Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
    => Tensor v1 t

    value: 4-D with shape `[batch, height, width, channels]`.

    -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64)

    (output, row_pooling_sequence, col_pooling_sequence)

    • output: output tensor after fractional max pooling.
    • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
    • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

    Performs fractional max pooling on the input.

    Fractional max pooling is slightly different than regular max pooling. In + regular max pooling, you downsize an input set by taking the maximum value of + smaller N x N subsections of the set (often 2x2), and try to reduce the set by + a factor of N, where N is an integer. Fractional max pooling, as you might + expect from the word "fractional", means that the overall reduction ratio N + does not have to be an integer.

    The sizes of the pooling regions are generated randomly but are fairly uniform. + For example, let's look at the height dimension, and the constraints on the + list of rows that will be pool boundaries.

    First we define the following:

    1. input_row_length : the number of rows from the input set
    2. output_row_length : which will be smaller than the input
    3. alpha = input_row_length / output_row_length : our reduction ratio
    4. K = floor(alpha)
    5. row_pooling_sequence : this is the result list of pool boundary rows

    Then, row_pooling_sequence should satisfy:

    1. a[0] = 0 : the first value of the sequence is 0
    2. a[end] = input_row_length : the last value of the sequence is the size
    3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
    4. length(row_pooling_sequence) = output_row_length+1

    For more details on fractional max pooling, see this paper: + Benjamin Graham, Fractional Max-Pooling

    matrixBandPart Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Rank k tensor.

    -> Tensor v2 Int64

    num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + lower triangle.

    -> Tensor v3 Int64

    num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + entire upper triangle.

    -> Tensor Value t

    band: Rank k tensor of the same shape as input. The extracted banded tensor.

    Copy a tensor setting everything outside a central band in each innermost matrix

    to zero.

    The band part is computed as follows: + Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a + tensor with the same shape where

    `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.

    The indicator function 'in_band(m, n)` is one if + `(num_lower < 0 || (m-n) <= num_lower)) && + (num_upper < 0 || (n-m) <= num_upper)`, and zero otherwise.

    For example:

    ```prettyprint + # if input is [[ 0, 1, 2, 3] + [-1, 0, 1, 2] + [-2, -1, 0, 1] + [-3, -2, -1, 0]],

    tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + [-1, 0, 1, 2] + [ 0, -1, 0, 1] + [ 0, 0, -1, 0]],

    tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + [-1, 0, 1, 0] + [-2, -1, 0, 1] + [ 0, -2, -1, 0]] + ```

    Useful special cases:

    ```prettyprint + tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + tf.matrix_band_part(input, 0, 0) ==> Diagonal. + ```

    decodeRaw Source

    Arguments

    :: (TensorType out_type, OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` out_type) 
    => Tensor v1 ByteString

    bytes: All the elements must have the same length.

    -> Tensor Value out_type

    output: A Tensor with one more dimension than the input bytes. The + added dimension will have size equal to the length of the elements + of bytes divided by the number of bytes to represent out_type.

    Reinterpret the bytes of a string as a vector of numbers.

    decodeJSONExample Source

    Arguments

    :: Tensor v1 ByteString

    json_examples: Each string is a JSON object serialized according to the JSON + mapping of the Example proto.

    -> Tensor Value ByteString

    binary_examples: Each string is a binary Example protocol buffer corresponding + to the respective element of json_examples.

    Convert JSON-encoded Example records to binary protocol buffer strings.

    This op translates a tensor containing Example records, encoded using + the standard JSON + mapping, + into a tensor containing the same records encoded as binary protocol + buffers. The resulting tensor can then be fed to any of the other + Example-parsing ops.

    truncatedNormal Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t, TensorType dtype, OneOf `[Word16, Double, Float]` dtype) 
    => Tensor v1 t

    shape: The shape of the output tensor.

    -> Tensor Value dtype

    output: A tensor of the specified shape filled with random truncated normal + values.

    Outputs random values from a truncated normal distribution.

    The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked.

    randomShuffle Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    value: The tensor to be shuffled.

    -> Tensor Value t

    output: A tensor of same shape and type as value, shuffled along its first + dimension.

    Randomly shuffles a tensor along its first dimension.

    The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + to one and only one `output[i]`. For example, a mapping that might occur for a + 3x2 tensor is:

    ```prettyprint + [[1, 2], [[5, 6], + [3, 4], ==> [1, 2], + [5, 6]] [3, 4]] + ```

    multinomial Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes.

    -> Tensor v2 Int32

    num_samples: 0-D. Number of independent samples to draw for each row slice.

    -> Tensor Value Int64

    output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + contains the drawn class labels with range `[0, num_classes)`.

    Draws samples from a multinomial distribution.

    randomGamma Source

    Arguments

    :: (TensorType s, OneOf `[Int32, Int64]` s, TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 s

    shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha.

    -> Tensor v2 t

    alpha: A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution.

    -> Tensor Value t

    output: A tensor with shape `shape + shape(alpha)`. Each slice + `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

    Outputs random values from the Gamma distribution(s) described by alpha.

    This op uses the algorithm by Marsaglia et al. to acquire samples via + transformation-rejection from pairs of uniform and normal random variables. + See http://dl.acm.org/citation.cfm?id=358414

    addN Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => [Tensor v1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Value t

    sum

    Add all input tensors element wise.

    max Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the maximum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    _Retval Source

    Arguments

    :: TensorType t 
    => Int64

    index: This return value is the index-th return value of the function.

    -> Tensor v1 t

    input: The return value.

    -> ControlNode 

    A graph node which represents a return value of a function.

    destroyTemporaryVariable Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    ref: A reference to the temporary variable tensor.

    -> Tensor Value t

    value

    Destroys the temporary variable and returns its final value.

    Sets output to the value of the Tensor pointed to by ref, then destroys + the temporary variable called var_name. + All other uses of ref *must* have executed before this op. + This is typically achieved by chaining the ref through each assign op, or by + using control dependencies.

    Outputs the final value of the tensor pointed to by ref.

    cast Source

    Arguments

    :: (TensorType dstT, TensorType srcT) 
    => Tensor v1 srcT

    x

    -> Tensor Value dstT

    y

    Cast x of type SrcT to y of DstT.

    countUpTo Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64]` t) 
    => Int64

    limit: If incrementing ref would bring it above limit, instead generates an + OutOfRange error.

    -> Tensor v1 t

    ref: Should be from a scalar Variable node.

    -> Tensor Value t

    output: A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct.

    Increments ref until it reaches limit.

    This operation outputs "ref" after the update is done. This makes it + easier to chain operations that need to use the updated value.

    abs Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute + value of each element in x. For example, if x is an input element and y is + an output element, this operation computes \(y = |x|\).

    neg Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes numerical negative value element-wise.

    I.e., \(y = -x\).

    sparseSparseMaximum Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

    -> Tensor v2 t

    a_values: 1-D. N non-empty values corresponding to a_indices.

    -> Tensor v3 Int64

    a_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 Int64

    b_indices: counterpart to a_indices for the other operand.

    -> Tensor v5 t

    b_values: counterpart to a_values for the other operand; must be of the same dtype.

    -> Tensor v6 Int64

    b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

    -> (Tensor Value Int64, Tensor Value t)

    (output_indices, output_values)

    • output_indices: 2-D. The indices of the output SparseTensor.
    • output_values: 1-D. The values of the output SparseTensor.

    Returns the element-wise max of two SparseTensors.

    Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

    invGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the gradient for the inverse of x wrt its input.

    Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy + is the corresponding input gradient.

    sqrt Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes square root of x element-wise.

    I.e., \(y = sqrt{x} = x^{1/2}\).

    matrixInverse Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Shape is `[..., M, M]`.

    -> Tensor Value t

    output: Shape is `[..., M, M]`.

    Computes the inverse of one or more square invertible matrices or their

    adjoints (conjugate transposes).

    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. The output is a tensor of the same shape as the input + containing the inverse for all input submatrices `[..., :, :]`.

    The op uses LU decomposition with partial pivoting to compute the inverses.

    If a matrix is not invertible there is no guarantee what the op does. It + may detect the condition and raise an exception or it may simply return a + garbage result.

    sqrtGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the gradient for the sqrt of x wrt its input.

    Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy + is the corresponding input gradient.

    expandDims Source

    Arguments

    :: (TensorType t, TensorType tdim, OneOf `[Int32, Int64]` tdim) 
    => Tensor v1 t

    input

    -> Tensor v2 tdim

    dim: 0-D (scalar). Specifies the dimension index at which to + expand the shape of input.

    -> Tensor Value t

    output: Contains the same data as input, but its shape has an additional + dimension of size 1 added.

    Inserts a dimension of 1 into a tensor's shape.

    Given a tensor input, this operation inserts a dimension of 1 at the + dimension index dim of input's shape. The dimension index dim starts at + zero; if you specify a negative number for dim it is counted backward from + the end.

    This operation is useful if you want to add a batch dimension to a single + element. For example, if you have a single image of shape `[height, width, + channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + which will make the shape `[1, height, width, channels]`.

    Other examples:

    ```prettyprint + # t is a tensor of shape [2] + shape(expand_dims(t, 0)) ==> [1, 2] + shape(expand_dims(t, 1)) ==> [2, 1] + shape(expand_dims(t, -1)) ==> [2, 1]

    # t2 is a tensor of shape [2, 3, 5] + shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + ```

    This operation requires that:

    `-1-input.dims() <= dim <= input.dims()`

    This operation is related to `squeeze()`, which removes dimensions of + size 1.

    all Source

    Arguments

    :: (TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 Bool

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value Bool

    output: The reduced tensor.

    Computes the "logical and" of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    cTCBeamSearchDecoder Source

    Arguments

    :: Int64

    beam_width: A scalar >= 0 (beam search beam width).

    -> Int64

    top_paths: A scalar >= 0, <= beam_width (controls output size).

    -> Tensor v1 Float

    inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

    -> Tensor v2 Int32

    sequence_length: A vector containing sequence lengths, size `(batch)`.

    -> ([Tensor Value Int64], [Tensor Value Int64], [Tensor Value Int64], Tensor Value Float)

    (decoded_indices, decoded_values, decoded_shape, log_probability)

    • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + size `(total_decoded_outputs[j] x 2)`, has indices of a + `SparseTensor2`. The rows store: [batch, time].
    • decoded_values: A list (length: top_paths) of values vectors. Vector j, + size `(length total_decoded_outputs[j])`, has the values of a + `SparseTensor2`. The vector stores the decoded classes for beam j.
    • decoded_shape: A list (length: top_paths) of shape vector. Vector j, + size `(2)`, stores the shape of the decoded `SparseTensor[j]`. + Its values are: `[batch_size, max_decoded_length[j]]`.
    • log_probability: A matrix, shaped: `(batch_size x top_paths)`. The + sequence log-probabilities.

    Performs beam search decoding on the logits given in input.

    A note about the attribute merge_repeated: For the beam search decoder, + this means that if consecutive entries in a beam are the same, only + the first of these is emitted. That is, when the top path is "A B B B B", + "A B" is returned if merge_repeated = True but "A B B B B" is + returned if merge_repeated = False.

    rsqrt Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes reciprocal of square root of x element-wise.

    I.e., \(y = 1 / sqrt{x}\).

    tanhGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the gradient for the tanh of x wrt its input.

    Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy + is the corresponding input gradient.

    sin Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes sin of x element-wise.

    matrixDeterminant Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input: Shape is `[..., M, M]`.

    -> Tensor Value t

    output: Shape is `[...]`.

    Computes the determinant of one ore more square matrices.

    The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. The output is a tensor containing the determinants + for all input submatrices `[..., :, :]`.

    cos Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes cos of x element-wise.

    batchToSpace Source

    Arguments

    :: (TensorType t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Int64

    block_size

    -> Tensor v1 t

    input: 4-D tensor with shape + `[batch*block_size*block_size, height_padblock_size, width_padblock_size, + depth]`. Note that the batch size of the input tensor must be divisible by + `block_size * block_size`.

    -> Tensor v2 tidx

    crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + how many elements to crop from the intermediate result across the spatial + dimensions as follows:

    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

    -> Tensor Value t

    output: 4-D with shape `[batch, height, width, depth]`, where:

    height = height_pad - crop_top - crop_bottom + width = width_pad - crop_left - crop_right

    The attr block_size must be greater than one. It indicates the block size.

    Some examples:

    1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

    ```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

    The output tensor has shape `[1, 2, 2, 1]` and value:

    ```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

    1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

    ```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

    The output tensor has shape `[1, 2, 2, 3]` and value:

    ```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

    1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

    ```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

    The output tensor has shape `[1, 4, 4, 1]` and value:

    ```prettyprint + x = [[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]] + ```

    1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

    ```prettyprint + x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + ```

    The output tensor has shape `[2, 2, 4, 1]` and value:

    ```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

    BatchToSpace for 4-D tensors of type T.

    This is a legacy version of the more general BatchToSpaceND.

    Rearranges (permutes) data from batch into blocks of spatial data, followed by + cropping. This is the reverse transformation of SpaceToBatch. More specifically, + this op outputs a copy of the input tensor where values from the batch + dimension are moved in spatial blocks to the height and width dimensions, + followed by cropping along the height and width dimensions.

    sparseToDense Source

    Arguments

    :: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 tindices

    sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

    -> Tensor v2 tindices

    output_shape: 1-D. Shape of the dense output tensor.

    -> Tensor v3 t

    sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

    -> Tensor v4 t

    default_value: Scalar value to set for indices not specified in + sparse_indices.

    -> Tensor Value t

    dense: Dense output tensor of shape output_shape.

    Converts a sparse representation into a dense tensor.

    Builds an array dense with shape output_shape such that

    ```prettyprint + # If sparse_indices is scalar + dense[i] = (i == sparse_indices ? sparse_values : default_value)

    # If sparse_indices is a vector, then for each i + dense[sparse_indices[i]] = sparse_values[i]

    # If sparse_indices is an n by d matrix, then for each i in [0, n) + dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + ```

    All other values in dense are set to default_value. If sparse_values is a + scalar, all sparse indices are set to this single value.

    Indices should be sorted in lexicographic order, and indices must not + contain any repeats. If validate_indices is true, these properties + are checked during execution.

    asin Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes asin of x element-wise.

    argMin Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input

    -> Tensor v2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Value Int64

    output

    Returns the index with the smallest value across dimensions of a tensor.

    isInf Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value Bool

    y

    Returns which elements of x are Inf.

    sign Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Returns an element-wise indication of the sign of a number.

    `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

    For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

    add Source

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

    sparseApplyFtrl Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    var: Should be from a Variable().

    -> Tensor v2 t

    accum: Should be from a Variable().

    -> Tensor v3 t

    linear: Should be from a Variable().

    -> Tensor v4 t

    grad: The gradient.

    -> Tensor v5 tindices

    indices: A vector of indices into the first dimension of var and accum.

    -> Tensor v6 t

    lr: Scaling factor. Must be a scalar.

    -> Tensor v7 t

    l1: L1 regularization. Must be a scalar.

    -> Tensor v8 t

    l2: L2 regularization. Must be a scalar.

    -> Tensor v9 t

    lr_power: Scaling factor. Must be a scalar.

    -> Tensor Value t

    out: Same as "var".

    Update relevant entries in '*var' according to the Ftrl-proximal scheme.

    That is for rows we have grad for, we update var, accum and linear as follows: + accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

    sub Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x - y element-wise.

    • NOTE*: Sub supports broadcasting. More about broadcasting + here

    batchFFT3D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    sparseReduceSumSparse Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    input_values: 1-D. N non-empty values corresponding to input_indices.

    -> Tensor v3 Int64

    input_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 Int32

    reduction_axes: 1-D. Length-K vector containing the reduction axes.

    -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

    (output_indices, output_values, output_shape)

    • output_indices
    • output_values
    • output_shape

    Computes the sum of elements across dimensions of a SparseTensor.

    This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + SparseTensor.

    Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

    If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

    biasAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    value: Any number of dimensions.

    -> Tensor v2 t

    bias: 1-D with size the last dimension of value.

    -> Tensor Value t

    output: Broadcasted sum of value and bias.

    Adds bias to value.

    This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

    mul Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x * y element-wise.

    • NOTE*: Mul supports broadcasting. More about broadcasting + here

    div Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x / y element-wise.

    • NOTE*: Div supports broadcasting. More about broadcasting + here

    loopCond Source

    Arguments

    :: Tensor v1 Bool

    input: A boolean scalar, representing the branch predicate of the Switch op.

    -> Tensor Value Bool

    output: The same tensor as input.

    Forwards the input to the output.

    This operator represents the loop termination condition used by the + "pivot" switches of a loop.

    squaredDifference Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns (x - y)(x - y) element-wise.

    • NOTE*: SquaredDifference supports broadcasting. More about broadcasting + here

    maximum Source

    Arguments

    :: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns the max of x and y (i.e. x > y ? x : y) element-wise.

    • NOTE*: Maximum supports broadcasting. More about broadcasting + here

    logUniformCandidateSampler Source

    Arguments

    :: Int64

    num_sampled: Number of candidates to randomly sample per batch.

    -> Int64

    num_true: Number of true labels per context.

    -> Int64

    range_max: The sampler will sample integers from the interval [0, range_max).

    -> Bool

    unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

    -> Tensor v1 Int64

    true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

    -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

    (sampled_candidates, true_expected_count, sampled_expected_count)

    • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
    • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
    • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

    Generates labels for candidate sampling with a log-uniform distribution.

    See explanations of candidate sampling and the data formats at + go/candidate-sampling.

    For each batch, this op picks a single set of sampled candidate labels.

    The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

    less Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value Bool

    z

    Returns the truth value of (x < y) element-wise.

    • NOTE*: Less supports broadcasting. More about broadcasting + here

    pow Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Computes the power of one value to another.

    Given a tensor x and a tensor y, this operation computes \(x^y\) for + corresponding elements in x and y. For example:

    ``` + # tensor x is [[2, 2]], [3, 3]] + # tensor y is [[8, 16], [2, 3]] + tf.pow(x, y) ==> [[256, 65536], [9, 27]] + ```

    igammac Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    x

    -> Tensor Value t

    z

    Compute the upper regularized incomplete Gamma function `Q(a, x)`.

    The upper regularized incomplete Gamma function is defined as:

    ``` + Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) + ``` + where + ``` + Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt + ``` + is the upper incomplete Gama function.

    Note, above `P(a, x)` (Igamma) is the lower regularized complete + Gamma function.

    igamma Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    x

    -> Tensor Value t

    z

    Compute the lower regularized incomplete Gamma function `Q(a, x)`.

    The lower regularized incomplete Gamma function is defined as:

    ``` + P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) + ``` + where + ``` + gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt + ``` + is the lower incomplete Gamma function.

    Note, above `Q(a, x)` (Igammac) is the upper regularized complete + Gamma function.

    zeta Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    q

    -> Tensor Value t

    z

    Compute the Hurwitz zeta function \(zeta(x, q)\).

    The Hurwitz zeta function is defined as:

    ``` + zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} + ```

    imag Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
    => Tensor v1 t

    input

    -> Tensor Value tout

    output

    Returns the imaginary part of a complex number.

    Given a tensor input of complex numbers, this operation returns a tensor of + type float that is the imaginary part of each element in input. All + elements in input must be complex numbers of the form \(a + bj\), where *a* + is the real part and *b* is the imaginary part returned by this operation.

    For example:

    ``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.imag(input) ==> [4.75, 5.75] + ```

    complex Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tout, OneOf `[Complex Double, Complex Float]` tout) 
    => Tensor v1 t

    real

    -> Tensor v2 t

    imag

    -> Tensor Value tout

    out

    Converts two real numbers to a complex number.

    Given a tensor real representing the real part of a complex number, and a + tensor imag representing the imaginary part of a complex number, this + operation returns complex numbers elementwise of the form \(a + bj\), where + *a* represents the real part and *b* represents the imag part.

    The input tensors real and imag must have the same shape.

    For example:

    ``` + # tensor real is [2.25, 3.25] + # tensor imag is [4.75, 5.75] + tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + ```

    notEqual Source

    Returns the truth value of (x != y) element-wise.

    • NOTE*: NotEqual supports broadcasting. More about broadcasting + here

    complexAbs Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
    => Tensor v1 t

    x

    -> Tensor Value tout

    y

    Computes the complex absolute value of a tensor.

    Given a tensor x of complex numbers, this operation returns a tensor of type + float or double that is the absolute value of each element in x. All + elements in x must be complex numbers of the form \(a + bj\). The absolute + value is computed as \( sqrt{a^2 + b^2}\).

    For example:

    ``` + # tensor x is [[-2.25 + 4.75j], [-3.25 + 5.75j]] + tf.complex_abs(x) ==> [5.25594902, 6.60492229] + ```

    logicalAnd Source

    Arguments

    :: Tensor v1 Bool

    x

    -> Tensor v2 Bool

    y

    -> Tensor Value Bool

    z

    Returns the truth value of x AND y element-wise.

    • NOTE*: LogicalAnd supports broadcasting. More about broadcasting + here

    batchFFT Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    select Source

    Arguments

    :: TensorType t 
    => Tensor v1 Bool

    condition

    -> Tensor v2 t

    t: = A Tensor which may have the same shape as condition. + If condition is rank 1, t may have higher rank, + but its first dimension must match the size of condition.

    -> Tensor v3 t

    e: = A Tensor with the same type and shape as t.

    -> Tensor Value t

    output: = A Tensor with the same type and shape as t and e.

    Selects elements from t or e, depending on condition.

    The t, and e tensors must all have the same shape, + and the output will also have that shape. The condition tensor + must be a scalar if t and e are scalars. If t and e are vectors + or higher rank, then condition must be either a vector with size + matching the first dimension of t, or must have the same shape as t.

    The condition tensor acts as a mask that chooses, based on the value at each + element, whether the corresponding element / row in the output should be + taken from t (if true) or e (if false).

    If condition is a vector and t and e are higher rank matrices, then + it chooses which row (outer dimension) to copy from t and e. + If condition has the same shape as t and e, then it chooses which + element to copy from t and e.

    For example:

    ```prettyprint + # condition tensor is [[True, False] + # [False, True]] + # t is [[1, 2], + # [3, 4]] + # e is [[5, 6], + # [7, 8]] + select(condition, t, e) ==> [[1, 6], + [7, 4]]

    # condition tensor is [True, False] + # t is [[1, 2], + # [3, 4]] + # e is [[5, 6], + # [7, 8]] + select(condition, t, e) ==> [[1, 2], + [7, 8]]

    ```

    matMul Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    b

    -> Tensor Value t

    product

    Multiply the matrix "a" by the matrix "b".

    The inputs must be two-dimensional matrices and the inner dimension of + "a" (after being transposed if transpose_a is true) must match the + outer dimension of "b" (after being transposed if transposed_b is + true).

    • Note*: The default kernel implementation for MatMul on GPUs uses + cublas.

    digamma Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes Psi, the derivative of Lgamma (the log of the absolute value of

    `Gamma(x)`), element-wise.

    conv2DBackpropFilter Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

    -> Tensor v2 Int32

    filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor.

    -> Tensor v3 t

    out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

    -> Tensor Value t

    output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

    Computes the gradients of convolution with respect to the filter.

    min Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the minimum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    isFinite Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value Bool

    y

    Returns which elements of x are finite.

    argMax Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    input

    -> Tensor v2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Value Int64

    output

    Returns the index with the largest value across dimensions of a tensor.

    segmentMean Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the mean along segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Computes a tensor such that + \(output_i = frac{sum_j data_j}{N}\) where mean is + over j such that `segment_ids[j] == i` and N is the total number of + values summed.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMean.png" alt + /div

    cumprod Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    x

    -> Tensor v2 tidx

    axis

    -> Tensor Value t

    out

    Compute the cumulative product of the tensor x along axis.

    By default, this op performs an inclusive cumprod, which means that the first + element of the input is identical to the first element of the output: + ```prettyprint + tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] + ```

    By setting the exclusive kwarg to True, an exclusive cumprod is + performed instead: + ```prettyprint + tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b] + ```

    By setting the reverse kwarg to True, the cumprod is performed in the + opposite direction: + ```prettyprint + tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] + ``` + This is more efficient than using separate `tf.reverse` ops.

    The reverse and exclusive kwargs can also be combined: + ```prettyprint + tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0] + ```

    segmentMin Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the minimum along segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Computes a tensor such that + \(output_i = min_j(data_j)\) where min is over j such + that `segment_ids[j] == i`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMin.png" alt + /div

    unsortedSegmentSum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A tensor whose shape is a prefix of `data.shape`.

    -> Tensor v3 Int32

    num_segments

    -> Tensor Value t

    output: Has same shape as data, except for the first `segment_ids.rank` + dimensions, which are replaced with a single dimension which has size + num_segments.

    Computes the sum along segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Computes a tensor such that + `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such + that `segment_ids[j...] == i`. Unlike SegmentSum, segment_ids + need not be sorted and need not cover all values in the full + range of valid values.

    If the sum is empty for a given segment ID i, `output[i] = 0`.

    num_segments should equal the number of distinct segment IDs.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/UnsortedSegmentSum.png" alt + /div

    tFRecordReader Source

    Arguments

    :: Tensor Value ByteString

    reader_handle: The handle to reference the Reader.

    A Reader that outputs the records from a TensorFlow Records file.

    sparseSegmentSum Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    data

    -> Tensor v2 tidx

    indices: A 1-D tensor. Has same rank as segment_ids.

    -> Tensor v3 Int32

    segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the sum along sparse segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Like SegmentSum, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

    For example:

    ```prettyprint + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

    # Select two rows, one segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + ==> [[0 0 0 0]]

    # Select two rows, two segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + ==> [[ 1 2 3 4] + [-1 -2 -3 -4]]

    # Select all rows, two segments. + tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + ==> [[0 0 0 0] + [5 6 7 8]]

    # Which is equivalent to: + tf.segment_sum(c, tf.constant([0, 0, 1])) + ```

    sparseSegmentSqrtN Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    data

    -> Tensor v2 tidx

    indices: A 1-D tensor. Has same rank as segment_ids.

    -> Tensor v3 Int32

    segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the sum along sparse segments of a tensor divided by the sqrt of N.

    N is the size of the segment being reduced.

    Read the section on + Segmentation for an explanation + of segments.

    copyHost Source

    Arguments

    :: TensorType t 
    => Tensor v1 t

    input: Input tensor.

    -> Tensor Value t

    output: Output tensor, deep-copied from input.

    Copy Host Op.

    Performs CPU-to-CPU deep-copying of tensor.

    Unlike the Copy Op, this op has HostMemory constraint on its input or output.

    variable Source

    Arguments

    :: TensorType dtype 
    => Tensor Value dtype

    ref: A reference to the variable tensor.

    Holds state in the form of a tensor that persists across steps.

    Outputs a ref to the tensor state so it may be read or modified. + TODO(zhifengc/mrry): Adds a pointer to a more detail document + about sharing states in tensorflow.

    sparseSegmentSqrtNGrad Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    grad: gradient propagated to the SparseSegmentSqrtN op.

    -> Tensor v2 tidx

    indices: indices passed to the corresponding SparseSegmentSqrtN op.

    -> Tensor v3 Int32

    segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

    -> Tensor v4 Int32

    output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

    -> Tensor Value t

    output

    Computes gradients for SparseSegmentSqrtN.

    Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

    range Source

    Arguments

    :: (TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 tidx

    start: 0-D (scalar). First entry in the sequence.

    -> Tensor v2 tidx

    limit: 0-D (scalar). Upper limit of sequence, exclusive.

    -> Tensor v3 tidx

    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

    -> Tensor Value tidx

    output: 1-D.

    Creates a sequence of integers.

    This operation creates a sequence of integers that begins at start and + extends by increments of delta up to but not including limit.

    For example:

    ``` + # start is 3 + # limit is 18 + # delta is 3 + tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + ```

    any Source

    Arguments

    :: (TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 Bool

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value Bool

    output: The reduced tensor.

    Computes the "logical or" of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    linSpace Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    start: First entry in the range.

    -> Tensor v2 t

    stop: Last entry in the range.

    -> Tensor v3 tidx

    num: Number of values to generate.

    -> Tensor Value t

    output: 1-D. The generated values.

    Generates values in an interval.

    A sequence of num evenly-spaced values are generated beginning at start. + If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + so that the last one is exactly stop.

    For example:

    ``` + tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + ```

    resizeArea Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    images: 4-D with shape `[batch, height, width, channels]`.

    -> Tensor v2 Int32

    size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

    -> Tensor Value Float

    resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

    Resize images to size using area interpolation.

    Input images can be of different types but output images are always float.

    real Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
    => Tensor v1 t

    input

    -> Tensor Value tout

    output

    Returns the real part of a complex number.

    Given a tensor input of complex numbers, this operation returns a tensor of + type float that is the real part of each element in input. All elements in + input must be complex numbers of the form \(a + bj\), where *a* is the real + part returned by this operation and *b* is the imaginary part.

    For example:

    ``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.real(input) ==> [-2.25, 3.25] + ```

    iFFT Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its inverse 1D Fourier Transform.

    Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most

    dimension of input.

    iFFT3D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input: A complex64 tensor.

    -> Tensor Value (Complex Float)

    output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their inverse 3D Fourier Transform.

    Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most

    3 dimensions of input.

    cross Source

    Arguments

    :: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    a: A tensor containing 3-element vectors.

    -> Tensor v2 t

    b: Another tensor, of same type and shape as a.

    -> Tensor Value t

    product: Pairwise cross product of the vectors in a and b.

    Compute the pairwise cross product.

    a and b must be the same shape; they can either be simple 3-element vectors, + or any shape where the innermost dimension is 3. In the latter case, each pair + of corresponding 3-element vectors is cross-multiplied independently.

    cumsum Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
    => Tensor v1 t

    x

    -> Tensor v2 tidx

    axis

    -> Tensor Value t

    out

    Compute the cumulative sum of the tensor x along axis.

    By default, this op performs an inclusive cumsum, which means that the first + element of the input is identical to the first element of the output: + ```prettyprint + tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] + ```

    By setting the exclusive kwarg to True, an exclusive cumsum is + performed instead: + ```prettyprint + tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] + ```

    By setting the reverse kwarg to True, the cumsum is performed in the + opposite direction: + ```prettyprint + tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] + ``` + This is more efficient than using separate `tf.reverse` ops.

    The reverse and exclusive kwargs can also be combined: + ```prettyprint + tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] + ```

    batchIFFT Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    erf Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the Gauss error function of x element-wise.

    barrierInsertMany Source

    Arguments

    :: TensorType t 
    => Int64

    component_index: The component of the barrier elements that is being assigned.

    -> Tensor v1 ByteString

    handle: The handle to a barrier.

    -> Tensor v2 ByteString

    keys: A one-dimensional tensor of keys, with length n.

    -> Tensor v3 t

    values: An any-dimensional tensor of values, which are associated with the + respective keys. The 0th dimension must have length n.

    -> ControlNode 

    For each key, assigns the respective value to the specified component.

    If a key is not found in the barrier, this operation will create a new + incomplete element. If a key is found in the barrier, and the element + already has a value at component_index, this operation will fail with + INVALID_ARGUMENT, and leave the barrier in an undefined state.

    floor Source

    Arguments

    :: (TensorType t, OneOf `[Word16, Double, Float]` t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Returns element-wise largest integer not greater than x.

    batchFFT2D Source

    Arguments

    :: Tensor v1 (Complex Float)

    input

    -> Tensor Value (Complex Float)

    output

    sparseAddGrad Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 t

    backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to + the non-empty values of the sum.

    -> Tensor v2 Int64

    a_indices: 2-D. The indices of the SparseTensor A, size `[nnz(A), ndims]`.

    -> Tensor v3 Int64

    b_indices: 2-D. The indices of the SparseTensor B, size `[nnz(B), ndims]`.

    -> Tensor v4 Int64

    sum_indices: 2-D. The indices of the sum SparseTensor, size + `[nnz(sum), ndims]`.

    -> (Tensor Value t, Tensor Value t)

    (a_val_grad, b_val_grad)

    • a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the + non-empty values of A.
    • b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the + non-empty values of B.

    The gradient operator for the SparseAdd op.

    The SparseAdd op calculates A + B, where A, B, and the sum are all represented + as SparseTensor objects. This op takes in the upstream gradient w.r.t. + non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + values of A and B.

    sparseAdd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType treal, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
    => Tensor v1 Int64

    a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

    -> Tensor v2 t

    a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

    -> Tensor v3 Int64

    a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

    -> Tensor v4 Int64

    b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

    -> Tensor v5 t

    b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

    -> Tensor v6 Int64

    b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

    -> Tensor v7 treal

    thresh: 0-D. The magnitude threshold that determines if an output value/index + pair takes space.

    -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

    (sum_indices, sum_values, sum_shape)

    • sum_indices
    • sum_values
    • sum_shape

    Adds two SparseTensor objects to produce another SparseTensor.

    The input SparseTensor objects' indices are assumed ordered in standard + lexicographic order. If this is not the case, before this step run + SparseReorder to restore index ordering.

    By default, if two values sum to zero at some index, the output SparseTensor + would still include that particular location in its index, storing a zero in the + corresponding value slot. To override this, callers can specify thresh, + indicating that if the sum has a magnitude strictly smaller than thresh, its + corresponding value and index would then not be included. In particular, + `thresh == 0` (default) means everything is kept and actual thresholding happens + only for a positive value.

    In the following shapes, nnz is the count after taking thresh into account.

    batchCholesky Source

    Arguments

    :: (TensorType t, OneOf `[Double, Float]` t) 
    => Tensor v1 t

    input

    -> Tensor Value t

    output

    dynamicPartition Source

    Arguments

    :: TensorType t 
    => Int64

    num_partitions: The number of partitions to output.

    -> Tensor v1 t

    data

    -> Tensor v2 Int32

    partitions: Any shape. Indices in the range `[0, num_partitions)`.

    -> [Tensor Value t]

    outputs

    Partitions `data` into num_partitions tensors using indices from partitions.

    For each index tuple js of size `partitions.ndim`, the slice `data[js, ...]` + becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + are placed in `outputs[i]` in lexicographic order of js, and the first + dimension of `outputs[i]` is the number of entries in partitions equal to i. + In detail,

    outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]

    outputs[i] = pack([data[js, ...] for js if partitions[js] == i])

    `data.shape` must start with `partitions.shape`.

    For example:

    # Scalar partitions + partitions = 1 + num_partitions = 2 + data = [10, 20] + outputs[0] = [] # Empty with shape [0, 2] + outputs[1] = [[10, 20]]

    # Vector partitions + partitions = [0, 0, 1, 1, 0] + num_partitions = 2 + data = [10, 20, 30, 40, 50] + outputs[0] = [10, 20, 50] + outputs[1] = [30, 40]

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/DynamicPartition.png" alt + /div

    serializeSparse Source

    Arguments

    :: TensorType t 
    => Tensor v1 Int64

    sparse_indices: 2-D. The indices of the SparseTensor.

    -> Tensor v2 t

    sparse_values: 1-D. The values of the SparseTensor.

    -> Tensor v3 Int64

    sparse_shape: 1-D. The shape of the SparseTensor.

    -> Tensor Value ByteString

    serialized_sparse

    Serialize a SparseTensor into a string 3-vector (1-D Tensor) object.

    sparseConcat Source

    Arguments

    :: TensorType t 
    => Int64

    concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), + where rank is the number of dimensions in each input SparseTensor.

    -> [Tensor v1 Int64]

    indices: 2-D. Indices of each input SparseTensor.

    -> [Tensor v2 t]

    values: 1-D. Non-empty values of each SparseTensor.

    -> [Tensor v3 Int64]

    shapes: 1-D. Shapes of each SparseTensor.

    -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

    (output_indices, output_values, output_shape)

    • output_indices: 2-D. Indices of the concatenated SparseTensor.
    • output_values: 1-D. Non-empty values of the concatenated SparseTensor.
    • output_shape: 1-D. Shape of the concatenated SparseTensor.

    Concatenates a list of SparseTensor along the specified dimension.

    Concatenation is with respect to the dense versions of these sparse tensors. + It is assumed that each input is a SparseTensor whose elements are ordered + along increasing dimension number.

    All inputs' shapes must match, except for the concat dimension. The + indices, values, and shapes lists must have the same length.

    The output shape is identical to the inputs', except along the concat + dimension, where it is the sum of the inputs' sizes along that dimension.

    The output elements will be resorted to preserve the sort order along + increasing dimension number.

    This op runs in `O(M log M)` time, where M is the total number of non-empty + values across all inputs. This is due to the need for an internal sort in + order to concatenate efficiently across an arbitrary dimension.

    For example, if `concat_dim = 1` and the inputs are

    sp_inputs[0]: shape = [2, 3] + [0, 2]: "a" + [1, 0]: "b" + [1, 1]: "c"

    sp_inputs[1]: shape = [2, 4] + [0, 1]: "d" + [0, 2]: "e"

    then the output will be

    shape = [2, 7] + [0, 2]: "a" + [0, 4]: "d" + [0, 5]: "e" + [1, 0]: "b" + [1, 1]: "c"

    Graphically this is equivalent to doing

    a
    concat [ d e ] = [ a d e ]
    b c
    [ ] [b c ]

    segmentProd Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
    => Tensor v1 t

    data

    -> Tensor v2 tindices

    segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

    -> Tensor Value t

    output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

    Computes the product along segments of a tensor.

    Read the section on + Segmentation for an explanation + of segments.

    Computes a tensor such that + \(output_i = prod_j data_j\) where the product is over j such + that `segment_ids[j] == i`.

    style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentProd.png" alt + /div

    sparseReshape Source

    Arguments

    :: Tensor v1 Int64

    input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + SparseTensor.

    -> Tensor v2 Int64

    input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

    -> Tensor v3 Int64

    new_shape: 1-D. R_out vector with the requested new dense shape.

    -> (Tensor Value Int64, Tensor Value Int64)

    (output_indices, output_shape)

    • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + values in the output SparseTensor.
    • output_shape: 1-D. R_out vector with the full dense shape of the output + SparseTensor. This is the same as new_shape but with any -1 dimensions + filled in.

    Reshapes a SparseTensor to represent values in a new dense shape.

    This operation has the same semantics as reshape on the represented dense + tensor. The input_indices are recomputed based on the requested new_shape.

    If one component of new_shape is the special value -1, the size of that + dimension is computed so that the total dense size remains constant. At + most one component of new_shape can be -1. The number of dense elements + implied by new_shape must be the same as the number of dense elements + originally implied by input_shape.

    Reshaping does not affect the order of values in the SparseTensor.

    If the input tensor has rank R_in and N non-empty values, and new_shape + has length R_out, then input_indices has shape `[N, R_in]`, + input_shape has length R_in, output_indices has shape `[N, R_out]`, and + output_shape has length R_out.

    sparseDenseCwiseMul Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    sp_values: 1-D. N non-empty values corresponding to sp_indices.

    -> Tensor v3 Int64

    sp_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 t

    dense: R-D. The dense Tensor operand.

    -> Tensor Value t

    output: 1-D. The N values that are operated on.

    Component-wise multiplies a SparseTensor by a dense Tensor.

    The output locations corresponding to the implicitly zero elements in the sparse + tensor will be zero (i.e., will not take up storage space), regardless of the + contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).

    • Limitation*: this Op only broadcasts the dense side to the sparse side, but not + the other direction.

    sparseDenseCwiseDiv Source

    Arguments

    :: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
    => Tensor v1 Int64

    sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

    -> Tensor v2 t

    sp_values: 1-D. N non-empty values corresponding to sp_indices.

    -> Tensor v3 Int64

    sp_shape: 1-D. Shape of the input SparseTensor.

    -> Tensor v4 t

    dense: R-D. The dense Tensor operand.

    -> Tensor Value t

    output: 1-D. The N values that are operated on.

    Component-wise divides a SparseTensor by a dense Tensor.

    • Limitation*: this Op only broadcasts the dense side to the sparse side, but not + the other direction.
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html new file mode 100644 index 0000000..0e1566a --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - _)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html new file mode 100644 index 0000000..10ea155 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - A)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html new file mode 100644 index 0000000..4a2a07b --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Index

    abortTensorFlow.GenOps.Core
    absTensorFlow.GenOps.Core
    acosTensorFlow.GenOps.Core
    addTensorFlow.GenOps.Core
    addNTensorFlow.GenOps.Core
    adjustContrastTensorFlow.GenOps.Core
    adjustContrastv2TensorFlow.GenOps.Core
    allTensorFlow.GenOps.Core
    allCandidateSamplerTensorFlow.GenOps.Core
    anyTensorFlow.GenOps.Core
    applyAdadeltaTensorFlow.GenOps.Core
    applyAdagradTensorFlow.GenOps.Core
    applyAdagradDATensorFlow.GenOps.Core
    applyAdamTensorFlow.GenOps.Core
    applyFtrlTensorFlow.GenOps.Core
    applyGradientDescentTensorFlow.GenOps.Core
    applyMomentumTensorFlow.GenOps.Core
    applyProximalAdagradTensorFlow.GenOps.Core
    applyProximalGradientDescentTensorFlow.GenOps.Core
    applyRMSPropTensorFlow.GenOps.Core
    argMaxTensorFlow.GenOps.Core
    argMinTensorFlow.GenOps.Core
    asinTensorFlow.GenOps.Core
    assignTensorFlow.GenOps.Core
    assignAddTensorFlow.GenOps.Core
    assignSubTensorFlow.GenOps.Core
    asStringTensorFlow.GenOps.Core
    atanTensorFlow.GenOps.Core
    audioSummaryTensorFlow.GenOps.Core
    avgPoolTensorFlow.GenOps.Core
    avgPool3DTensorFlow.GenOps.Core
    avgPool3DGradTensorFlow.GenOps.Core
    avgPoolGradTensorFlow.GenOps.Core
    barrierTensorFlow.GenOps.Core
    barrierCloseTensorFlow.GenOps.Core
    barrierIncompleteSizeTensorFlow.GenOps.Core
    barrierInsertManyTensorFlow.GenOps.Core
    barrierReadySizeTensorFlow.GenOps.Core
    batchCholeskyTensorFlow.GenOps.Core
    batchCholeskyGradTensorFlow.GenOps.Core
    batchFFTTensorFlow.GenOps.Core
    batchFFT2DTensorFlow.GenOps.Core
    batchFFT3DTensorFlow.GenOps.Core
    batchIFFTTensorFlow.GenOps.Core
    batchIFFT2DTensorFlow.GenOps.Core
    batchIFFT3DTensorFlow.GenOps.Core
    batchMatMulTensorFlow.GenOps.Core
    batchMatrixBandPartTensorFlow.GenOps.Core
    batchMatrixDeterminantTensorFlow.GenOps.Core
    batchMatrixDiagTensorFlow.GenOps.Core
    batchMatrixDiagPartTensorFlow.GenOps.Core
    batchMatrixInverseTensorFlow.GenOps.Core
    batchMatrixSetDiagTensorFlow.GenOps.Core
    batchMatrixSolveTensorFlow.GenOps.Core
    batchMatrixSolveLsTensorFlow.GenOps.Core
    batchMatrixTriangularSolveTensorFlow.GenOps.Core
    batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
    batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
    batchSelfAdjointEigTensorFlow.GenOps.Core
    batchSelfAdjointEigV2TensorFlow.GenOps.Core
    batchSvdTensorFlow.GenOps.Core
    batchToSpaceTensorFlow.GenOps.Core
    batchToSpaceNDTensorFlow.GenOps.Core
    betaincTensorFlow.GenOps.Core
    biasAddTensorFlow.GenOps.Core
    biasAddGradTensorFlow.GenOps.Core
    biasAddV1TensorFlow.GenOps.Core
    bitcastTensorFlow.GenOps.Core
    broadcastGradientArgsTensorFlow.GenOps.Core
    castTensorFlow.GenOps.Core
    ceilTensorFlow.GenOps.Core
    checkNumericsTensorFlow.GenOps.Core
    choleskyTensorFlow.GenOps.Core
    choleskyGradTensorFlow.GenOps.Core
    complexTensorFlow.GenOps.Core
    complexAbsTensorFlow.GenOps.Core
    computeAccidentalHitsTensorFlow.GenOps.Core
    concatTensorFlow.GenOps.Core
    concatOffsetTensorFlow.GenOps.Core
    conjTensorFlow.GenOps.Core
    constTensorFlow.GenOps.Core
    controlTriggerTensorFlow.GenOps.Core
    conv2DTensorFlow.GenOps.Core
    conv2DBackpropFilterTensorFlow.GenOps.Core
    conv2DBackpropInputTensorFlow.GenOps.Core
    conv3DTensorFlow.GenOps.Core
    conv3DBackpropFilterTensorFlow.GenOps.Core
    conv3DBackpropFilterV2TensorFlow.GenOps.Core
    conv3DBackpropInputTensorFlow.GenOps.Core
    conv3DBackpropInputV2TensorFlow.GenOps.Core
    copyTensorFlow.GenOps.Core
    copyHostTensorFlow.GenOps.Core
    cosTensorFlow.GenOps.Core
    countUpToTensorFlow.GenOps.Core
    cropAndResizeTensorFlow.GenOps.Core
    cropAndResizeGradBoxesTensorFlow.GenOps.Core
    cropAndResizeGradImageTensorFlow.GenOps.Core
    crossTensorFlow.GenOps.Core
    cTCBeamSearchDecoderTensorFlow.GenOps.Core
    cTCGreedyDecoderTensorFlow.GenOps.Core
    cTCLossTensorFlow.GenOps.Core
    cumprodTensorFlow.GenOps.Core
    cumsumTensorFlow.GenOps.Core
    debugIdentityTensorFlow.GenOps.Core
    debugNanCountTensorFlow.GenOps.Core
    decodeBase64TensorFlow.GenOps.Core
    decodeGifTensorFlow.GenOps.Core
    decodeJpegTensorFlow.GenOps.Core
    decodeJSONExampleTensorFlow.GenOps.Core
    decodePngTensorFlow.GenOps.Core
    decodeRawTensorFlow.GenOps.Core
    deleteSessionTensorTensorFlow.GenOps.Core
    depthToSpaceTensorFlow.GenOps.Core
    depthwiseConv2dNativeTensorFlow.GenOps.Core
    depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
    depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
    deserializeManySparseTensorFlow.GenOps.Core
    destroyTemporaryVariableTensorFlow.GenOps.Core
    diagTensorFlow.GenOps.Core
    diagPartTensorFlow.GenOps.Core
    digammaTensorFlow.GenOps.Core
    dilation2DTensorFlow.GenOps.Core
    dilation2DBackpropFilterTensorFlow.GenOps.Core
    dilation2DBackpropInputTensorFlow.GenOps.Core
    divTensorFlow.GenOps.Core
    drawBoundingBoxesTensorFlow.GenOps.Core
    dynamicPartitionTensorFlow.GenOps.Core
    dynamicStitchTensorFlow.GenOps.Core
    editDistanceTensorFlow.GenOps.Core
    eluTensorFlow.GenOps.Core
    eluGradTensorFlow.GenOps.Core
    encodeBase64TensorFlow.GenOps.Core
    encodeJpegTensorFlow.GenOps.Core
    encodePngTensorFlow.GenOps.Core
    enterTensorFlow.GenOps.Core
    equalTensorFlow.GenOps.Core
    erfTensorFlow.GenOps.Core
    erfcTensorFlow.GenOps.Core
    exitTensorFlow.GenOps.Core
    expTensorFlow.GenOps.Core
    expandDimsTensorFlow.GenOps.Core
    extractGlimpseTensorFlow.GenOps.Core
    extractImagePatchesTensorFlow.GenOps.Core
    factTensorFlow.GenOps.Core
    fFTTensorFlow.GenOps.Core
    fFT2DTensorFlow.GenOps.Core
    fFT3DTensorFlow.GenOps.Core
    fIFOQueueTensorFlow.GenOps.Core
    fillTensorFlow.GenOps.Core
    fixedLengthRecordReaderTensorFlow.GenOps.Core
    fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
    floorTensorFlow.GenOps.Core
    fractionalAvgPoolTensorFlow.GenOps.Core
    fractionalAvgPoolGradTensorFlow.GenOps.Core
    fractionalMaxPoolTensorFlow.GenOps.Core
    fractionalMaxPoolGradTensorFlow.GenOps.Core
    fusedResizeAndPadConv2DTensorFlow.GenOps.Core
    gatherTensorFlow.GenOps.Core
    gatherNdTensorFlow.GenOps.Core
    getSessionHandleTensorFlow.GenOps.Core
    getSessionTensorTensorFlow.GenOps.Core
    greaterTensorFlow.GenOps.Core
    greaterEqualTensorFlow.GenOps.Core
    histogramSummaryTensorFlow.GenOps.Core
    hSVToRGBTensorFlow.GenOps.Core
    identityTensorFlow.GenOps.Core
    identityReaderTensorFlow.GenOps.Core
    iFFTTensorFlow.GenOps.Core
    iFFT2DTensorFlow.GenOps.Core
    iFFT3DTensorFlow.GenOps.Core
    igammaTensorFlow.GenOps.Core
    igammacTensorFlow.GenOps.Core
    imagTensorFlow.GenOps.Core
    imageSummaryTensorFlow.GenOps.Core
    immutableConstTensorFlow.GenOps.Core
    initializeTableTensorFlow.GenOps.Core
    initializeTableFromTextFileTensorFlow.GenOps.Core
    inTopKTensorFlow.GenOps.Core
    invTensorFlow.GenOps.Core
    invertPermutationTensorFlow.GenOps.Core
    invGradTensorFlow.GenOps.Core
    isFiniteTensorFlow.GenOps.Core
    isInfTensorFlow.GenOps.Core
    isNanTensorFlow.GenOps.Core
    isVariableInitializedTensorFlow.GenOps.Core
    l2LossTensorFlow.GenOps.Core
    learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
    lessTensorFlow.GenOps.Core
    lessEqualTensorFlow.GenOps.Core
    lgammaTensorFlow.GenOps.Core
    linSpaceTensorFlow.GenOps.Core
    listDiffTensorFlow.GenOps.Core
    logTensorFlow.GenOps.Core
    logicalAndTensorFlow.GenOps.Core
    logicalNotTensorFlow.GenOps.Core
    logicalOrTensorFlow.GenOps.Core
    logSoftmaxTensorFlow.GenOps.Core
    logUniformCandidateSamplerTensorFlow.GenOps.Core
    lookupTableExportTensorFlow.GenOps.Core
    lookupTableFindTensorFlow.GenOps.Core
    lookupTableImportTensorFlow.GenOps.Core
    lookupTableInsertTensorFlow.GenOps.Core
    lookupTableSizeTensorFlow.GenOps.Core
    loopCondTensorFlow.GenOps.Core
    lRNTensorFlow.GenOps.Core
    lRNGradTensorFlow.GenOps.Core
    matchingFilesTensorFlow.GenOps.Core
    matMulTensorFlow.GenOps.Core
    matrixBandPartTensorFlow.GenOps.Core
    matrixDeterminantTensorFlow.GenOps.Core
    matrixDiagTensorFlow.GenOps.Core
    matrixDiagPartTensorFlow.GenOps.Core
    matrixInverseTensorFlow.GenOps.Core
    matrixSetDiagTensorFlow.GenOps.Core
    matrixSolveTensorFlow.GenOps.Core
    matrixSolveLsTensorFlow.GenOps.Core
    matrixTriangularSolveTensorFlow.GenOps.Core
    maxTensorFlow.GenOps.Core
    maximumTensorFlow.GenOps.Core
    maxPoolTensorFlow.GenOps.Core
    maxPool3DTensorFlow.GenOps.Core
    maxPool3DGradTensorFlow.GenOps.Core
    maxPoolGradTensorFlow.GenOps.Core
    maxPoolGradWithArgmaxTensorFlow.GenOps.Core
    maxPoolWithArgmaxTensorFlow.GenOps.Core
    meanTensorFlow.GenOps.Core
    mergeTensorFlow.GenOps.Core
    mergeSummaryTensorFlow.GenOps.Core
    minTensorFlow.GenOps.Core
    minimumTensorFlow.GenOps.Core
    mirrorPadTensorFlow.GenOps.Core
    mirrorPadGradTensorFlow.GenOps.Core
    modTensorFlow.GenOps.Core
    mulTensorFlow.GenOps.Core
    multinomialTensorFlow.GenOps.Core
    negTensorFlow.GenOps.Core
    negTrainTensorFlow.GenOps.Core
    nextIterationTensorFlow.GenOps.Core
    nonMaxSuppressionTensorFlow.GenOps.Core
    noOpTensorFlow.GenOps.Core
    notEqualTensorFlow.GenOps.Core
    oneHotTensorFlow.GenOps.Core
    packTensorFlow.GenOps.Core
    padTensorFlow.GenOps.Core
    paddingFIFOQueueTensorFlow.GenOps.Core
    parameterizedTruncatedNormalTensorFlow.GenOps.Core
    parseTensorTensorFlow.GenOps.Core
    placeholderTensorFlow.GenOps.Core
    placeholderWithDefaultTensorFlow.GenOps.Core
    polygammaTensorFlow.GenOps.Core
    powTensorFlow.GenOps.Core
    priorityQueueTensorFlow.GenOps.Core
    prodTensorFlow.GenOps.Core
    quantizeAndDequantizeTensorFlow.GenOps.Core
    queueCloseTensorFlow.GenOps.Core
    queueSizeTensorFlow.GenOps.Core
    randomCropTensorFlow.GenOps.Core
    randomGammaTensorFlow.GenOps.Core
    randomShuffleTensorFlow.GenOps.Core
    randomShuffleQueueTensorFlow.GenOps.Core
    randomStandardNormalTensorFlow.GenOps.Core
    randomUniformTensorFlow.GenOps.Core
    randomUniformIntTensorFlow.GenOps.Core
    rangeTensorFlow.GenOps.Core
    rankTensorFlow.GenOps.Core
    readerNumRecordsProducedTensorFlow.GenOps.Core
    readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
    readerReadTensorFlow.GenOps.Core
    readerReadUpToTensorFlow.GenOps.Core
    readerResetTensorFlow.GenOps.Core
    readerRestoreStateTensorFlow.GenOps.Core
    readerSerializeStateTensorFlow.GenOps.Core
    readFileTensorFlow.GenOps.Core
    realTensorFlow.GenOps.Core
    reduceJoinTensorFlow.GenOps.Core
    refEnterTensorFlow.GenOps.Core
    refExitTensorFlow.GenOps.Core
    refIdentityTensorFlow.GenOps.Core
    refMergeTensorFlow.GenOps.Core
    refNextIterationTensorFlow.GenOps.Core
    refSelectTensorFlow.GenOps.Core
    refSwitchTensorFlow.GenOps.Core
    reluTensorFlow.GenOps.Core
    relu6TensorFlow.GenOps.Core
    relu6GradTensorFlow.GenOps.Core
    reluGradTensorFlow.GenOps.Core
    reshapeTensorFlow.GenOps.Core
    resizeAreaTensorFlow.GenOps.Core
    resizeBicubicTensorFlow.GenOps.Core
    resizeBilinearTensorFlow.GenOps.Core
    resizeBilinearGradTensorFlow.GenOps.Core
    resizeNearestNeighborTensorFlow.GenOps.Core
    resizeNearestNeighborGradTensorFlow.GenOps.Core
    restoreTensorFlow.GenOps.Core
    restoreSliceTensorFlow.GenOps.Core
    reverseTensorFlow.GenOps.Core
    reverseSequenceTensorFlow.GenOps.Core
    rGBToHSVTensorFlow.GenOps.Core
    rsqrtTensorFlow.GenOps.Core
    rsqrtGradTensorFlow.GenOps.Core
    sampleDistortedBoundingBoxTensorFlow.GenOps.Core
    scalarSummaryTensorFlow.GenOps.Core
    scatterAddTensorFlow.GenOps.Core
    scatterDivTensorFlow.GenOps.Core
    scatterMulTensorFlow.GenOps.Core
    scatterSubTensorFlow.GenOps.Core
    scatterUpdateTensorFlow.GenOps.Core
    segmentMaxTensorFlow.GenOps.Core
    segmentMeanTensorFlow.GenOps.Core
    segmentMinTensorFlow.GenOps.Core
    segmentProdTensorFlow.GenOps.Core
    segmentSumTensorFlow.GenOps.Core
    selectTensorFlow.GenOps.Core
    selfAdjointEigTensorFlow.GenOps.Core
    selfAdjointEigV2TensorFlow.GenOps.Core
    serializeManySparseTensorFlow.GenOps.Core
    serializeSparseTensorFlow.GenOps.Core
    shapeTensorFlow.GenOps.Core
    shapeNTensorFlow.GenOps.Core
    shardedFilenameTensorFlow.GenOps.Core
    shardedFilespecTensorFlow.GenOps.Core
    sigmoidTensorFlow.GenOps.Core
    sigmoidGradTensorFlow.GenOps.Core
    signTensorFlow.GenOps.Core
    sinTensorFlow.GenOps.Core
    sizeTensorFlow.GenOps.Core
    sliceTensorFlow.GenOps.Core
    softmaxTensorFlow.GenOps.Core
    softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
    softplusTensorFlow.GenOps.Core
    softplusGradTensorFlow.GenOps.Core
    softsignTensorFlow.GenOps.Core
    softsignGradTensorFlow.GenOps.Core
    spaceToBatchTensorFlow.GenOps.Core
    spaceToBatchNDTensorFlow.GenOps.Core
    spaceToDepthTensorFlow.GenOps.Core
    sparseAddTensorFlow.GenOps.Core
    sparseAddGradTensorFlow.GenOps.Core
    sparseApplyAdadeltaTensorFlow.GenOps.Core
    sparseApplyAdagradTensorFlow.GenOps.Core
    sparseApplyAdagradDATensorFlow.GenOps.Core
    sparseApplyFtrlTensorFlow.GenOps.Core
    sparseApplyMomentumTensorFlow.GenOps.Core
    sparseApplyProximalAdagradTensorFlow.GenOps.Core
    sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
    sparseApplyRMSPropTensorFlow.GenOps.Core
    sparseConcatTensorFlow.GenOps.Core
    sparseDenseCwiseAddTensorFlow.GenOps.Core
    sparseDenseCwiseDivTensorFlow.GenOps.Core
    sparseDenseCwiseMulTensorFlow.GenOps.Core
    sparseMatMulTensorFlow.GenOps.Core
    sparseReduceSumTensorFlow.GenOps.Core
    sparseReduceSumSparseTensorFlow.GenOps.Core
    sparseReorderTensorFlow.GenOps.Core
    sparseReshapeTensorFlow.GenOps.Core
    sparseSegmentMeanTensorFlow.GenOps.Core
    sparseSegmentMeanGradTensorFlow.GenOps.Core
    sparseSegmentSqrtNTensorFlow.GenOps.Core
    sparseSegmentSqrtNGradTensorFlow.GenOps.Core
    sparseSegmentSumTensorFlow.GenOps.Core
    sparseSoftmaxTensorFlow.GenOps.Core
    sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
    sparseSparseMaximumTensorFlow.GenOps.Core
    sparseSparseMinimumTensorFlow.GenOps.Core
    sparseSplitTensorFlow.GenOps.Core
    sparseTensorDenseAddTensorFlow.GenOps.Core
    sparseTensorDenseMatMulTensorFlow.GenOps.Core
    sparseToDenseTensorFlow.GenOps.Core
    splitTensorFlow.GenOps.Core
    sqrtTensorFlow.GenOps.Core
    sqrtGradTensorFlow.GenOps.Core
    squareTensorFlow.GenOps.Core
    squaredDifferenceTensorFlow.GenOps.Core
    squeezeTensorFlow.GenOps.Core
    stackCloseTensorFlow.GenOps.Core
    stackPopTensorFlow.GenOps.Core
    stackPushTensorFlow.GenOps.Core
    stopGradientTensorFlow.GenOps.Core
    stridedSliceTensorFlow.GenOps.Core
    stridedSliceAssignTensorFlow.GenOps.Core
    stridedSliceGradTensorFlow.GenOps.Core
    stringJoinTensorFlow.GenOps.Core
    stringSplitTensorFlow.GenOps.Core
    stringToHashBucketTensorFlow.GenOps.Core
    stringToHashBucketFastTensorFlow.GenOps.Core
    stringToHashBucketStrongTensorFlow.GenOps.Core
    stringToNumberTensorFlow.GenOps.Core
    subTensorFlow.GenOps.Core
    sumTensorFlow.GenOps.Core
    svdTensorFlow.GenOps.Core
    switchTensorFlow.GenOps.Core
    tanTensorFlow.GenOps.Core
    tanhTensorFlow.GenOps.Core
    tanhGradTensorFlow.GenOps.Core
    temporaryVariableTensorFlow.GenOps.Core
    tensorArrayCloseTensorFlow.GenOps.Core
    tensorArrayConcatTensorFlow.GenOps.Core
    tensorArrayGatherTensorFlow.GenOps.Core
    tensorArrayGradTensorFlow.GenOps.Core
    tensorArrayPackTensorFlow.GenOps.Core
    tensorArrayReadTensorFlow.GenOps.Core
    tensorArrayScatterTensorFlow.GenOps.Core
    tensorArraySizeTensorFlow.GenOps.Core
    tensorArraySplitTensorFlow.GenOps.Core
    tensorArrayUnpackTensorFlow.GenOps.Core
    tensorArrayWriteTensorFlow.GenOps.Core
    tensorSummaryTensorFlow.GenOps.Core
    textLineReaderTensorFlow.GenOps.Core
    tFRecordReaderTensorFlow.GenOps.Core
    threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
    tileTensorFlow.GenOps.Core
    tileGradTensorFlow.GenOps.Core
    topKTensorFlow.GenOps.Core
    topKV2TensorFlow.GenOps.Core
    transposeTensorFlow.GenOps.Core
    truncatedNormalTensorFlow.GenOps.Core
    uniformCandidateSamplerTensorFlow.GenOps.Core
    uniqueTensorFlow.GenOps.Core
    uniqueWithCountsTensorFlow.GenOps.Core
    unpackTensorFlow.GenOps.Core
    unsortedSegmentSumTensorFlow.GenOps.Core
    variableTensorFlow.GenOps.Core
    where'TensorFlow.GenOps.Core
    wholeFileReaderTensorFlow.GenOps.Core
    zerosLikeTensorFlow.GenOps.Core
    zetaTensorFlow.GenOps.Core
    _ArgTensorFlow.GenOps.Core
    _HostCastTensorFlow.GenOps.Core
    _HostRecvTensorFlow.GenOps.Core
    _HostSendTensorFlow.GenOps.Core
    _RecvTensorFlow.GenOps.Core
    _RetvalTensorFlow.GenOps.Core
    _SendTensorFlow.GenOps.Core
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html new file mode 100644 index 0000000..b7ff19b --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - B)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Index - B

    barrierTensorFlow.GenOps.Core
    barrierCloseTensorFlow.GenOps.Core
    barrierIncompleteSizeTensorFlow.GenOps.Core
    barrierInsertManyTensorFlow.GenOps.Core
    barrierReadySizeTensorFlow.GenOps.Core
    batchCholeskyTensorFlow.GenOps.Core
    batchCholeskyGradTensorFlow.GenOps.Core
    batchFFTTensorFlow.GenOps.Core
    batchFFT2DTensorFlow.GenOps.Core
    batchFFT3DTensorFlow.GenOps.Core
    batchIFFTTensorFlow.GenOps.Core
    batchIFFT2DTensorFlow.GenOps.Core
    batchIFFT3DTensorFlow.GenOps.Core
    batchMatMulTensorFlow.GenOps.Core
    batchMatrixBandPartTensorFlow.GenOps.Core
    batchMatrixDeterminantTensorFlow.GenOps.Core
    batchMatrixDiagTensorFlow.GenOps.Core
    batchMatrixDiagPartTensorFlow.GenOps.Core
    batchMatrixInverseTensorFlow.GenOps.Core
    batchMatrixSetDiagTensorFlow.GenOps.Core
    batchMatrixSolveTensorFlow.GenOps.Core
    batchMatrixSolveLsTensorFlow.GenOps.Core
    batchMatrixTriangularSolveTensorFlow.GenOps.Core
    batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
    batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
    batchSelfAdjointEigTensorFlow.GenOps.Core
    batchSelfAdjointEigV2TensorFlow.GenOps.Core
    batchSvdTensorFlow.GenOps.Core
    batchToSpaceTensorFlow.GenOps.Core
    batchToSpaceNDTensorFlow.GenOps.Core
    betaincTensorFlow.GenOps.Core
    biasAddTensorFlow.GenOps.Core
    biasAddGradTensorFlow.GenOps.Core
    biasAddV1TensorFlow.GenOps.Core
    bitcastTensorFlow.GenOps.Core
    broadcastGradientArgsTensorFlow.GenOps.Core
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html new file mode 100644 index 0000000..9bed483 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - C)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html new file mode 100644 index 0000000..ef74313 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - D)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Index - D

    debugIdentityTensorFlow.GenOps.Core
    debugNanCountTensorFlow.GenOps.Core
    decodeBase64TensorFlow.GenOps.Core
    decodeGifTensorFlow.GenOps.Core
    decodeJpegTensorFlow.GenOps.Core
    decodeJSONExampleTensorFlow.GenOps.Core
    decodePngTensorFlow.GenOps.Core
    decodeRawTensorFlow.GenOps.Core
    deleteSessionTensorTensorFlow.GenOps.Core
    depthToSpaceTensorFlow.GenOps.Core
    depthwiseConv2dNativeTensorFlow.GenOps.Core
    depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
    depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
    deserializeManySparseTensorFlow.GenOps.Core
    destroyTemporaryVariableTensorFlow.GenOps.Core
    diagTensorFlow.GenOps.Core
    diagPartTensorFlow.GenOps.Core
    digammaTensorFlow.GenOps.Core
    dilation2DTensorFlow.GenOps.Core
    dilation2DBackpropFilterTensorFlow.GenOps.Core
    dilation2DBackpropInputTensorFlow.GenOps.Core
    divTensorFlow.GenOps.Core
    drawBoundingBoxesTensorFlow.GenOps.Core
    dynamicPartitionTensorFlow.GenOps.Core
    dynamicStitchTensorFlow.GenOps.Core
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html new file mode 100644 index 0000000..1ae3874 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - E)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html new file mode 100644 index 0000000..f511a43 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - F)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html new file mode 100644 index 0000000..3623797 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - G)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html new file mode 100644 index 0000000..12f960e --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - H)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html new file mode 100644 index 0000000..5076cc3 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - I)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html new file mode 100644 index 0000000..df05ab0 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - L)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html new file mode 100644 index 0000000..d43cd4c --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - M)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html new file mode 100644 index 0000000..c2e42d8 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - N)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html new file mode 100644 index 0000000..1f3b4cc --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - O)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html new file mode 100644 index 0000000..ddea867 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - P)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html new file mode 100644 index 0000000..243dcea --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Q)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html new file mode 100644 index 0000000..4296c9f --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - R)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html new file mode 100644 index 0000000..bce9611 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - S)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Index - S

    sampleDistortedBoundingBoxTensorFlow.GenOps.Core
    scalarSummaryTensorFlow.GenOps.Core
    scatterAddTensorFlow.GenOps.Core
    scatterDivTensorFlow.GenOps.Core
    scatterMulTensorFlow.GenOps.Core
    scatterSubTensorFlow.GenOps.Core
    scatterUpdateTensorFlow.GenOps.Core
    segmentMaxTensorFlow.GenOps.Core
    segmentMeanTensorFlow.GenOps.Core
    segmentMinTensorFlow.GenOps.Core
    segmentProdTensorFlow.GenOps.Core
    segmentSumTensorFlow.GenOps.Core
    selectTensorFlow.GenOps.Core
    selfAdjointEigTensorFlow.GenOps.Core
    selfAdjointEigV2TensorFlow.GenOps.Core
    serializeManySparseTensorFlow.GenOps.Core
    serializeSparseTensorFlow.GenOps.Core
    shapeTensorFlow.GenOps.Core
    shapeNTensorFlow.GenOps.Core
    shardedFilenameTensorFlow.GenOps.Core
    shardedFilespecTensorFlow.GenOps.Core
    sigmoidTensorFlow.GenOps.Core
    sigmoidGradTensorFlow.GenOps.Core
    signTensorFlow.GenOps.Core
    sinTensorFlow.GenOps.Core
    sizeTensorFlow.GenOps.Core
    sliceTensorFlow.GenOps.Core
    softmaxTensorFlow.GenOps.Core
    softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
    softplusTensorFlow.GenOps.Core
    softplusGradTensorFlow.GenOps.Core
    softsignTensorFlow.GenOps.Core
    softsignGradTensorFlow.GenOps.Core
    spaceToBatchTensorFlow.GenOps.Core
    spaceToBatchNDTensorFlow.GenOps.Core
    spaceToDepthTensorFlow.GenOps.Core
    sparseAddTensorFlow.GenOps.Core
    sparseAddGradTensorFlow.GenOps.Core
    sparseApplyAdadeltaTensorFlow.GenOps.Core
    sparseApplyAdagradTensorFlow.GenOps.Core
    sparseApplyAdagradDATensorFlow.GenOps.Core
    sparseApplyFtrlTensorFlow.GenOps.Core
    sparseApplyMomentumTensorFlow.GenOps.Core
    sparseApplyProximalAdagradTensorFlow.GenOps.Core
    sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
    sparseApplyRMSPropTensorFlow.GenOps.Core
    sparseConcatTensorFlow.GenOps.Core
    sparseDenseCwiseAddTensorFlow.GenOps.Core
    sparseDenseCwiseDivTensorFlow.GenOps.Core
    sparseDenseCwiseMulTensorFlow.GenOps.Core
    sparseMatMulTensorFlow.GenOps.Core
    sparseReduceSumTensorFlow.GenOps.Core
    sparseReduceSumSparseTensorFlow.GenOps.Core
    sparseReorderTensorFlow.GenOps.Core
    sparseReshapeTensorFlow.GenOps.Core
    sparseSegmentMeanTensorFlow.GenOps.Core
    sparseSegmentMeanGradTensorFlow.GenOps.Core
    sparseSegmentSqrtNTensorFlow.GenOps.Core
    sparseSegmentSqrtNGradTensorFlow.GenOps.Core
    sparseSegmentSumTensorFlow.GenOps.Core
    sparseSoftmaxTensorFlow.GenOps.Core
    sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
    sparseSparseMaximumTensorFlow.GenOps.Core
    sparseSparseMinimumTensorFlow.GenOps.Core
    sparseSplitTensorFlow.GenOps.Core
    sparseTensorDenseAddTensorFlow.GenOps.Core
    sparseTensorDenseMatMulTensorFlow.GenOps.Core
    sparseToDenseTensorFlow.GenOps.Core
    splitTensorFlow.GenOps.Core
    sqrtTensorFlow.GenOps.Core
    sqrtGradTensorFlow.GenOps.Core
    squareTensorFlow.GenOps.Core
    squaredDifferenceTensorFlow.GenOps.Core
    squeezeTensorFlow.GenOps.Core
    stackCloseTensorFlow.GenOps.Core
    stackPopTensorFlow.GenOps.Core
    stackPushTensorFlow.GenOps.Core
    stopGradientTensorFlow.GenOps.Core
    stridedSliceTensorFlow.GenOps.Core
    stridedSliceAssignTensorFlow.GenOps.Core
    stridedSliceGradTensorFlow.GenOps.Core
    stringJoinTensorFlow.GenOps.Core
    stringSplitTensorFlow.GenOps.Core
    stringToHashBucketTensorFlow.GenOps.Core
    stringToHashBucketFastTensorFlow.GenOps.Core
    stringToHashBucketStrongTensorFlow.GenOps.Core
    stringToNumberTensorFlow.GenOps.Core
    subTensorFlow.GenOps.Core
    sumTensorFlow.GenOps.Core
    svdTensorFlow.GenOps.Core
    switchTensorFlow.GenOps.Core
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html new file mode 100644 index 0000000..60771c2 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - T)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html new file mode 100644 index 0000000..eb6bf96 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - U)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html new file mode 100644 index 0000000..57902bb --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - V)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html new file mode 100644 index 0000000..c3ad075 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - W)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html new file mode 100644 index 0000000..564dda7 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Z)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html new file mode 100644 index 0000000..f2397b4 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index)

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-core-ops-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html new file mode 100644 index 0000000..42c5482 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

    Code generated signatures for the Ops in libtensorflow_c.

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html new file mode 100644 index 0000000..6d16294 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html @@ -0,0 +1,4 @@ +TensorFlow.GenOps.Core

    TensorFlow.GenOps.Core

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/minus.gif b/docs/haddock/tensorflow-core-ops-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/plus.gif b/docs/haddock/tensorflow-core-ops-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html new file mode 100644 index 0000000..eb0a175 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html @@ -0,0 +1,22618 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/TensorFlow/GenOps/Core.hs + + + +
    {-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +module TensorFlow.GenOps.Core where
    +
    +import Data.ByteString (ByteString)
    +import Data.Complex (Complex)
    +import Data.Int (Int8, Int16, Int32, Int64)
    +import Data.Word (Word8, Word16)
    +import Lens.Family2 ((.~), (&))
    +import TensorFlow.Build
    +import TensorFlow.BuildOp
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +
    +-- | Receives the named tensor from send_device on recv_device.
    +--
    +-- _HostRecv requires its input on host memory whereas _Recv requires its
    +-- input on device memory.
    +_HostRecv :: forall tensor_type . (TensorType tensor_type) =>
    +             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
    +             -> Tensor Value tensor_type -- ^ __tensor__: The tensor to receive.
    +_HostRecv send_device_incarnation | eqLengthGuard [] =
    +    buildOp (opDef "_HostRecv"
    +             & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
    +             & opAttr "send_device_incarnation" .~ send_device_incarnation)
    +        
    +{-
    +attr { name: "tensor_type" type: "type" }
    +attr {
    +  description: "The name of the tensor to receive."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  description: "The name of the device sending the tensor."
    +  name: "send_device"
    +  type: "string"
    +}
    +attr {
    +  description: "The current incarnation of send_device."
    +  name: "send_device_incarnation"
    +  type: "int"
    +}
    +attr {
    +  description: "The name of the device receiving the tensor."
    +  name: "recv_device"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
    +  name: "client_terminated"
    +  type: "bool"
    +}
    +output_arg {
    +  description: "The tensor to receive."
    +  name: "tensor"
    +  type_attr: "tensor_type"
    +}
    +-}
    +
    +-- | Receives the named tensor from send_device on recv_device.
    +
    +_Recv :: forall tensor_type . (TensorType tensor_type) =>
    +         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
    +         -> Tensor Value tensor_type -- ^ __tensor__: The tensor to receive.
    +_Recv send_device_incarnation | eqLengthGuard [] =
    +    buildOp (opDef "_Recv"
    +             & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
    +             & opAttr "send_device_incarnation" .~ send_device_incarnation)
    +        
    +{-
    +attr { name: "tensor_type" type: "type" }
    +attr {
    +  description: "The name of the tensor to receive."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  description: "The name of the device sending the tensor."
    +  name: "send_device"
    +  type: "string"
    +}
    +attr {
    +  description: "The current incarnation of send_device."
    +  name: "send_device_incarnation"
    +  type: "int"
    +}
    +attr {
    +  description: "The name of the device receiving the tensor."
    +  name: "recv_device"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
    +  name: "client_terminated"
    +  type: "bool"
    +}
    +output_arg {
    +  description: "The tensor to receive."
    +  name: "tensor"
    +  type_attr: "tensor_type"
    +}
    +-}
    +
    +-- | Sends the named tensor from send_device to recv_device.
    +
    +_Send :: forall v1 t . (TensorType t) =>
    +         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
    +         -> Tensor v1 t -- ^ __tensor__: The tensor to send.
    +         -> ControlNode
    +_Send send_device_incarnation tensor | eqLengthGuard [] =
    +    buildOp (opDef "_Send"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "send_device_incarnation" .~ send_device_incarnation)
    +        tensor
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The name of the tensor to send."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  description: "The name of the device sending the tensor."
    +  name: "send_device"
    +  type: "string"
    +}
    +attr {
    +  description: "The current incarnation of send_device."
    +  name: "send_device_incarnation"
    +  type: "int"
    +}
    +attr {
    +  description: "The name of the device receiving the tensor."
    +  name: "recv_device"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
    +  name: "client_terminated"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "The tensor to send." name: "tensor" type_attr: "T"
    +}
    +-}
    +
    +-- | A graph node which represents an argument to a function.
    +
    +_Arg :: forall t . (TensorType t) =>
    +        Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function.
    +        -> Tensor Value t -- ^ __output__: The argument.
    +_Arg index | eqLengthGuard [] =
    +    buildOp (opDef "_Arg"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "index" .~ index)
    +        
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "This argument is the index-th argument of the function."
    +  has_minimum: true
    +  name: "index"
    +  type: "int"
    +}
    +output_arg {
    +  description: "The argument." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the RMSProp algorithm.
    +--
    +-- Note that in dense implement of this algorithm, ms and mom will
    +-- update even if the grad is zero, but in this sparse implement, ms
    +-- and mom will not update in iterations the grad is zero.
    +-- 
    +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
    +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
    +-- 
    +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
    +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
    +-- var <- var - mom
    +sparseApplyRMSProp :: forall v1 v2 v3 v4 v5 v6 v7 v8 v9 t
    +                      tindices . (TensorType t,
    +                                  OneOf '[(Data.Complex.Complex Double),
    +                                          (Data.Complex.Complex Float),
    +                                          Data.Int.Int16, Data.Int.Int32,
    +                                          Data.Int.Int64, Data.Int.Int8,
    +                                          Data.Word.Word16, Data.Word.Word8,
    +                                          Double, Float] t, TensorType tindices,
    +                                  OneOf '[Data.Int.Int32,
    +                                          Data.Int.Int64] tindices) =>
    +                      Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                      -> Tensor v2 t -- ^ __ms__: Should be from a Variable().
    +                      -> Tensor v3 t -- ^ __mom__: Should be from a Variable().
    +                      -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                      -> Tensor v5 t -- ^ __rho__: Decay rate. Must be a scalar.
    +                      -> Tensor v6 t -- ^ __momentum__
    +                      -> Tensor v7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
    +                      -> Tensor v8 t -- ^ __grad__: The gradient.
    +                      -> Tensor v9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
    +                      -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyRMSProp var ms mom lr rho momentum epsilon grad
    +                   indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyRMSProp"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var ms mom lr rho momentum epsilon grad indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "ms"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "mom"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Decay rate. Must be a scalar."
    +  name: "rho"
    +  type_attr: "T"
    +}
    +input_arg { name: "momentum" type_attr: "T" }
    +input_arg {
    +  description: "Ridge term. Must be a scalar."
    +  name: "epsilon"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var, ms and mom."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the Adam algorithm.
    +--
    +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
    +-- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
    +-- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
    +-- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
    +applyAdam :: forall v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 t . (TensorType t,
    +                                                        OneOf '[(Data.Complex.Complex Double),
    +                                                                (Data.Complex.Complex Float),
    +                                                                Data.Int.Int16,
    +                                                                Data.Int.Int32,
    +                                                                Data.Int.Int64,
    +                                                                Data.Int.Int8,
    +                                                                Data.Word.Word16,
    +                                                                Data.Word.Word8,
    +                                                                Double,
    +                                                                Float] t) =>
    +             Tensor v1 t -- ^ __var__: Should be from a Variable().
    +             -> Tensor v2 t -- ^ __m__: Should be from a Variable().
    +             -> Tensor v3 t -- ^ __v__: Should be from a Variable().
    +             -> Tensor v4 t -- ^ __beta1_power__: Must be a scalar.
    +             -> Tensor v5 t -- ^ __beta2_power__: Must be a scalar.
    +             -> Tensor v6 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +             -> Tensor v7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
    +             -> Tensor v8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
    +             -> Tensor v9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
    +             -> Tensor v10 t -- ^ __grad__: The gradient.
    +             -> Tensor Value t -- ^ __out__: Same as "var".
    +applyAdam var m v beta1_power beta2_power lr beta1 beta2 epsilon
    +          grad | eqLengthGuard [] =
    +    buildOp (opDef "ApplyAdam"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var m v beta1_power beta2_power lr beta1 beta2 epsilon grad
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "m"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "v"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Must be a scalar." name: "beta1_power" type_attr: "T"
    +}
    +input_arg {
    +  description: "Must be a scalar." name: "beta2_power" type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Momentum factor. Must be a scalar."
    +  name: "beta1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Momentum factor. Must be a scalar."
    +  name: "beta2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Ridge term. Must be a scalar."
    +  name: "epsilon"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update relevant entries in '*var' and '*accum' according to the momentum scheme.
    +--
    +-- Set use_nesterov = True if you want to use Nesterov momentum.
    +-- 
    +-- That is for rows we have grad for, we update var and accum as follows:
    +-- 
    +-- accum = accum * momentum + grad
    +-- var -= lr * accum
    +sparseApplyMomentum :: forall v1 v2 v3 v4 v5 v6 t tindices . (TensorType t,
    +                                                              OneOf '[(Data.Complex.Complex Double),
    +                                                                      (Data.Complex.Complex Float),
    +                                                                      Data.Int.Int16,
    +                                                                      Data.Int.Int32,
    +                                                                      Data.Int.Int64,
    +                                                                      Data.Int.Int8,
    +                                                                      Data.Word.Word16,
    +                                                                      Data.Word.Word8,
    +                                                                      Double,
    +                                                                      Float] t,
    +                                                              TensorType tindices,
    +                                                              OneOf '[Data.Int.Int32,
    +                                                                      Data.Int.Int64] tindices) =>
    +                       Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                       -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                       -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
    +                       -> Tensor v4 t -- ^ __grad__: The gradient.
    +                       -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                       -> Tensor v6 t -- ^ __momentum__: Momentum. Must be a scalar.
    +                       -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyMomentum var accum lr grad indices momentum | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyMomentum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var accum lr grad indices momentum
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
    +  name: "use_nesterov"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Learning rate. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "Momentum. Must be a scalar."
    +  name: "momentum"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you
    +--
    +-- want to use Nesterov momentum.
    +-- 
    +-- accum = accum * momentum + grad
    +-- var -= lr * accum
    +applyMomentum :: forall v1 v2 v3 v4 v5 t . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t) =>
    +                 Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                 -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                 -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                 -> Tensor v4 t -- ^ __grad__: The gradient.
    +                 -> Tensor v5 t -- ^ __momentum__: Momentum. Must be a scalar.
    +                 -> Tensor Value t -- ^ __out__: Same as "var".
    +applyMomentum var accum lr grad momentum | eqLengthGuard [] =
    +    buildOp (opDef "ApplyMomentum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var accum lr grad momentum
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
    +  name: "use_nesterov"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "Momentum. Must be a scalar."
    +  name: "momentum"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the Ftrl-proximal scheme.
    +--
    +-- accum_new = accum + grad * grad
    +-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
    +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
    +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
    +-- accum = accum_new
    +applyFtrl :: forall v1 v2 v3 v4 v5 v6 v7 v8 t . (TensorType t,
    +                                                 OneOf '[(Data.Complex.Complex Double),
    +                                                         (Data.Complex.Complex Float),
    +                                                         Data.Int.Int16,
    +                                                         Data.Int.Int32,
    +                                                         Data.Int.Int64,
    +                                                         Data.Int.Int8,
    +                                                         Data.Word.Word16,
    +                                                         Data.Word.Word8,
    +                                                         Double, Float] t) =>
    +             Tensor v1 t -- ^ __var__: Should be from a Variable().
    +             -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +             -> Tensor v3 t -- ^ __linear__: Should be from a Variable().
    +             -> Tensor v4 t -- ^ __grad__: The gradient.
    +             -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +             -> Tensor v6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
    +             -> Tensor v7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
    +             -> Tensor v8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
    +             -> Tensor Value t -- ^ __out__: Same as "var".
    +applyFtrl var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] =
    +    buildOp (opDef "ApplyFtrl"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var accum linear grad lr l1 l2 lr_power
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "linear"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regulariation. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regulariation. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr_power"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
    +
    +sparseApplyAdagradDA :: forall v1 v2 v3 v4 v5 v6 v7 v8 v9 t
    +                        tindices . (TensorType t,
    +                                    OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Int.Int16, Data.Int.Int32,
    +                                            Data.Int.Int64, Data.Int.Int8,
    +                                            Data.Word.Word16, Data.Word.Word8,
    +                                            Double, Float] t,
    +                                    TensorType tindices, OneOf '[Data.Int.Int32,
    +                                                                 Data.Int.Int64] tindices) =>
    +                        Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                        -> Tensor v2 t -- ^ __gradient_accumulator__: Should be from a Variable().
    +                        -> Tensor v3 t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
    +                        -> Tensor v4 t -- ^ __grad__: The gradient.
    +                        -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                        -> Tensor v6 t -- ^ __lr__: Learning rate. Must be a scalar.
    +                        -> Tensor v7 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                        -> Tensor v8 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                        -> Tensor v9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
    +                        -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyAdagradDA var gradient_accumulator gradient_squared_accumulator grad
    +                     indices lr l1 l2 global_step | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyAdagradDA"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var gradient_accumulator gradient_squared_accumulator grad indices lr l1
    +        l2 global_step
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "gradient_accumulator"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "gradient_squared_accumulator"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "Learning rate. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Training step number. Must be a scalar."
    +  name: "global_step"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
    +--
    +-- That is for rows we have grad for, we update var and accum as follows:
    +-- accum += grad * grad
    +-- var -= lr * grad * (1 / sqrt(accum))
    +sparseApplyAdagrad :: forall v1 v2 v3 v4 v5 t tindices . (TensorType t,
    +                                                          OneOf '[(Data.Complex.Complex Double),
    +                                                                  (Data.Complex.Complex Float),
    +                                                                  Data.Int.Int16,
    +                                                                  Data.Int.Int32,
    +                                                                  Data.Int.Int64,
    +                                                                  Data.Int.Int8,
    +                                                                  Data.Word.Word16,
    +                                                                  Data.Word.Word8,
    +                                                                  Double,
    +                                                                  Float] t,
    +                                                          TensorType tindices,
    +                                                          OneOf '[Data.Int.Int32,
    +                                                                  Data.Int.Int64] tindices) =>
    +                      Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                      -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                      -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
    +                      -> Tensor v4 t -- ^ __grad__: The gradient.
    +                      -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                      -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyAdagrad var accum lr grad indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyAdagrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var accum lr grad indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Learning rate. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
    +--
    +-- accum += grad * grad
    +-- prox_v = var - lr * grad * (1 / sqrt(accum))
    +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
    +applyProximalAdagrad :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t,
    +                                                      OneOf '[(Data.Complex.Complex Double),
    +                                                              (Data.Complex.Complex Float),
    +                                                              Data.Int.Int16,
    +                                                              Data.Int.Int32,
    +                                                              Data.Int.Int64,
    +                                                              Data.Int.Int8,
    +                                                              Data.Word.Word16,
    +                                                              Data.Word.Word8,
    +                                                              Double,
    +                                                              Float] t) =>
    +                        Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                        -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                        -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                        -> Tensor v4 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                        -> Tensor v5 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                        -> Tensor v6 t -- ^ __grad__: The gradient.
    +                        -> Tensor Value t -- ^ __out__: Same as "var".
    +applyProximalAdagrad var accum lr l1 l2 grad | eqLengthGuard [] =
    +    buildOp (opDef "ApplyProximalAdagrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var accum lr l1 l2 grad
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the adagrad scheme.
    +--
    +-- accum += grad * grad
    +-- var -= lr * grad * (1 / sqrt(accum))
    +applyAdagrad :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                        OneOf '[(Data.Complex.Complex Double),
    +                                                (Data.Complex.Complex Float),
    +                                                Data.Int.Int16, Data.Int.Int32,
    +                                                Data.Int.Int64, Data.Int.Int8,
    +                                                Data.Word.Word16,
    +                                                Data.Word.Word8, Double,
    +                                                Float] t) =>
    +                Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                -> Tensor v4 t -- ^ __grad__: The gradient.
    +                -> Tensor Value t -- ^ __out__: Same as "var".
    +applyAdagrad var accum lr grad | eqLengthGuard [] =
    +    buildOp (opDef "ApplyAdagrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var accum lr grad
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the adadelta scheme.
    +--
    +-- accum = rho() * accum + (1 - rho()) * grad.square();
    +-- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
    +-- update_accum = rho() * update_accum + (1 - rho()) * update.square();
    +-- var -= update;
    +applyAdadelta :: forall v1 v2 v3 v4 v5 v6 v7 t . (TensorType t,
    +                                                  OneOf '[(Data.Complex.Complex Double),
    +                                                          (Data.Complex.Complex Float),
    +                                                          Data.Int.Int16,
    +                                                          Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Data.Int.Int8,
    +                                                          Data.Word.Word16,
    +                                                          Data.Word.Word8,
    +                                                          Double, Float] t) =>
    +                 Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                 -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                 -> Tensor v3 t -- ^ __accum_update__: Should be from a Variable().
    +                 -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                 -> Tensor v5 t -- ^ __rho__: Decay factor. Must be a scalar.
    +                 -> Tensor v6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
    +                 -> Tensor v7 t -- ^ __grad__: The gradient.
    +                 -> Tensor Value t -- ^ __out__: Same as "var".
    +applyAdadelta var accum accum_update lr rho epsilon grad | eqLengthGuard [] =
    +    buildOp (opDef "ApplyAdadelta"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var accum accum_update lr rho epsilon grad
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum_update"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Decay factor. Must be a scalar."
    +  name: "rho"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Constant factor. Must be a scalar."
    +  name: "epsilon"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
    +--
    +-- That is for rows we have grad for, we update var as follows:
    +-- prox_v = var - alpha * grad
    +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
    +sparseApplyProximalGradientDescent :: forall v1 v2 v3 v4 v5 v6 t
    +                                      tindices . (TensorType t,
    +                                                  OneOf '[(Data.Complex.Complex Double),
    +                                                          (Data.Complex.Complex Float),
    +                                                          Data.Int.Int16,
    +                                                          Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Data.Int.Int8,
    +                                                          Data.Word.Word16,
    +                                                          Data.Word.Word8,
    +                                                          Double, Float] t,
    +                                                  TensorType tindices,
    +                                                  OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64] tindices) =>
    +                                      Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                                      -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
    +                                      -> Tensor v3 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                                      -> Tensor v4 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                                      -> Tensor v5 t -- ^ __grad__: The gradient.
    +                                      -> Tensor v6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                                      -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyProximalGradientDescent var alpha l1 l2 grad
    +                                   indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyProximalGradientDescent"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var alpha l1 l2 grad indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "alpha"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' as FOBOS algorithm with fixed learning rate.
    +--
    +-- prox_v = var - alpha * delta
    +-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
    +applyProximalGradientDescent :: forall v1 v2 v3 v4 v5 t . (TensorType t,
    +                                                           OneOf '[(Data.Complex.Complex Double),
    +                                                                   (Data.Complex.Complex Float),
    +                                                                   Data.Int.Int16,
    +                                                                   Data.Int.Int32,
    +                                                                   Data.Int.Int64,
    +                                                                   Data.Int.Int8,
    +                                                                   Data.Word.Word16,
    +                                                                   Data.Word.Word8,
    +                                                                   Double,
    +                                                                   Float] t) =>
    +                                Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                                -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
    +                                -> Tensor v3 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                                -> Tensor v4 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                                -> Tensor v5 t -- ^ __delta__: The change.
    +                                -> Tensor Value t -- ^ __out__: Same as "var".
    +applyProximalGradientDescent var alpha l1 l2 delta | eqLengthGuard [] =
    +    buildOp (opDef "ApplyProximalGradientDescent"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var alpha l1 l2 delta
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "alpha"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The change." name: "delta" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Encode strings into web-safe base64 format.
    +--
    +-- Refer to the following article for more information on base64 format:
    +-- en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
    +-- end so that the encoded has length multiple of 4. See Padding section of the
    +-- link above.
    +-- 
    +-- Web-safe means that the encoder uses - and _ instead of + and /.
    +encodeBase64 :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded.
    +                -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64.
    +encodeBase64 input | eqLengthGuard [] =
    +    buildOp (opDef "EncodeBase64")
    +        input
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "Bool whether padding is applied at the ends."
    +  name: "pad"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Strings to be encoded." name: "input" type: DT_STRING
    +}
    +output_arg {
    +  description: "Input strings encoded in base64."
    +  name: "output"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Split elements of `input` based on `delimiter` into a `SparseTensor`.
    +--
    +-- Let N be the size of source (typically N will be the batch size). Split each
    +-- element of `input` based on `delimiter` and return a `SparseTensor`
    +-- containing the splitted tokens. Empty tokens are ignored.
    +-- 
    +-- `delimiter` can be empty or a single character. If `delimiter` is an empty
    +--  string, each element of `input` is split into individual 1 character strings.
    +-- 
    +-- For example:
    +--   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
    +--   will be
    +-- 
    +--   indices = [0, 0;
    +--              0, 1;
    +--              1, 0;
    +--              1, 1;
    +--              1, 2]
    +--   shape = [2, 3]
    +--   values = ['hello', 'world', 'a', 'b', 'c']
    +stringSplit :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split.
    +               -> Tensor v2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter character, or empty string.
    +               -> (Tensor Value Data.Int.Int64,
    +                   Tensor Value Data.ByteString.ByteString,
    +                   Tensor Value Data.Int.Int64)
    +               -- ^ (__indices__, __values__, __shape__)
    +               --
    +               -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor.
    +               --
    +               -- * __values__: A vector of strings corresponding to the splited values.
    +               --
    +               -- * __shape__: a length-2 vector of int64 representing the shape of the sparse
    +               -- tensor, where the first value is N and the second value is the maximum number
    +               -- of tokens in a single input entry.
    +stringSplit input delimiter | eqLengthGuard [] =
    +    buildOp (opDef "StringSplit")
    +        input delimiter
    +{-
    +input_arg {
    +  description: "1-D. Strings to split." name: "input" type: DT_STRING
    +}
    +input_arg {
    +  description: "0-D. Delimiter character, or empty string."
    +  name: "delimiter"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A dense matrix of int64 representing the indices of the sparse tensor."
    +  name: "indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of strings corresponding to the splited values."
    +  name: "values"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry."
    +  name: "shape"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Joins the strings in the given list of string tensors into one tensor;
    +--
    +-- with the given separator (default is an empty separator).
    +stringJoin :: [Tensor v1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors.  The tensors must all have the same shape,
    +                                                     -- or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
    +                                                     -- of non-scalar inputs.
    +              -> Tensor Value Data.ByteString.ByteString -- ^ __output__
    +stringJoin inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "StringJoin"
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        inputs
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +attr {
    +  default_value { s: "" }
    +  description: "string, an optional join separator."
    +  name: "separator"
    +  type: "string"
    +}
    +input_arg {
    +  description: "A list of string tensors.  The tensors must all have the same shape,\nor be scalars.  Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs."
    +  name: "inputs"
    +  number_attr: "N"
    +  type: DT_STRING
    +}
    +output_arg { name: "output" type: DT_STRING }
    +-}
    +
    +-- | Converts each entry in the given tensor to strings.  Supports many numeric
    +--
    +-- types and boolean.
    +asString :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Float),
    +                                                 Bool, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Double, Float] t) =>
    +            Tensor v1 t -- ^ __input__
    +            -> Tensor Value Data.ByteString.ByteString -- ^ __output__
    +asString input | eqLengthGuard [] =
    +    buildOp (opDef "AsString"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_BOOL
    +      type: DT_INT8
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1."
    +  name: "precision"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "Use scientific notation for floating point numbers."
    +  name: "scientific"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "Use shortest representation (either scientific or standard) for\nfloating point numbers."
    +  name: "shortest"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1."
    +  name: "width"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "The value to pad if width > -1.  If empty, pads with spaces.\nAnother typical value is \'0\'.  String cannot be longer than 1 character."
    +  name: "fill"
    +  type: "string"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type: DT_STRING }
    +-}
    +
    +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
    +--
    +-- The hash function is deterministic on the content of the string within the
    +-- process. The hash function is a keyed hash function, where attribute `key`
    +-- defines the key of the hash function. `key` is an array of 2 elements.
    +-- 
    +-- A strong hash is important when inputs may be malicious, e.g. URLs with
    +-- additional components. Adversaries could try to make their inputs hash to the
    +-- same bucket for a denial-of-service attack or to skew the results. A strong
    +-- hash prevents this by making it dificult, if not infeasible, to compute inputs
    +-- that hash to the same bucket. This comes at a cost of roughly 4x higher compute
    +-- time than tf.string_to_hash_bucket_fast.
    +stringToHashBucketStrong :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
    +                            -> Tensor v1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
    +                            -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
    +stringToHashBucketStrong num_buckets input | eqLengthGuard [] =
    +    buildOp (opDef "StringToHashBucketStrong"
    +             & opAttr "num_buckets" .~ num_buckets)
    +        input
    +{-
    +attr {
    +  description: "The number of buckets."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_buckets"
    +  type: "int"
    +}
    +attr {
    +  description: "The key for the keyed hash function passed as a list of two uint64\nelements."
    +  name: "key"
    +  type: "list(int)"
    +}
    +input_arg {
    +  description: "The strings to assign a hash bucket."
    +  name: "input"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A Tensor of the same shape as the input `string_tensor`."
    +  name: "output"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Multiplies sparse updates into a variable reference.
    +--
    +-- This operation computes
    +-- 
    +--     # Scalar indices
    +--     ref[indices, ...] *= updates[...]
    +-- 
    +--     # Vector indices (for each i)
    +--     ref[indices[i], ...] *= updates[i, ...]
    +-- 
    +--     # High rank indices (for each i, ..., j)
    +--     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
    +-- 
    +-- This operation outputs `ref` after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +-- 
    +-- Duplicate entries are handled correctly: if multiple `indices` reference
    +-- the same location, their contributions multiply.
    +-- 
    +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
    +scatterMul :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t,
    +                                            TensorType tindices,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
    +              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`.
    +              -> Tensor Value t -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
    +              -- to use the updated values after the update is done.
    +scatterMul ref indices updates | eqLengthGuard [] =
    +    buildOp (opDef "ScatterMul"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        ref indices updates
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A tensor of indices into the first dimension of `ref`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "A tensor of updated values to multiply to `ref`."
    +  name: "updates"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Joins a string Tensor across the given dimensions.
    +--
    +-- Computes the string join across dimensions in the given string Tensor of shape
    +-- `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input
    +-- strings with the given separator (default: empty string).  Negative indices are
    +-- counted backwards from the end, with `-1` being equivalent to `n - 1`.  Passing
    +-- an empty `reduction_indices` joins all strings in linear index order and outputs
    +-- a scalar string.
    +-- 
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor `a` is [["a", "b"], ["c", "d"]]
    +-- tf.reduce_join(a, 0) ==> ["ac", "bd"]
    +-- tf.reduce_join(a, 1) ==> ["ab", "cd"]
    +-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
    +-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
    +-- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
    +-- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
    +-- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
    +-- tf.reduce_join(a, [0, 1]) ==> ["acbd"]
    +-- tf.reduce_join(a, [1, 0]) ==> ["abcd"]
    +-- tf.reduce_join(a, []) ==> ["abcd"]
    +-- ```
    +reduceJoin :: Tensor v1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined.  All reduced indices must have non-zero size.
    +              -> Tensor v2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over.  Dimensions are reduced in the
    +                                          -- order specified.  Omitting `reduction_indices` is equivalent to passing
    +                                          -- `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
    +              -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or
    +              -- set to `1` depending on `keep_dims`.
    +reduceJoin inputs reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "ReduceJoin")
    +        inputs reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, retain reduced dimensions with length `1`."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "The separator to use when joining."
    +  name: "separator"
    +  type: "string"
    +}
    +input_arg {
    +  description: "The input to be joined.  All reduced indices must have non-zero size."
    +  name: "inputs"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The dimensions to reduce over.  Dimensions are reduced in the\norder specified.  Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported."
    +  name: "reduction_indices"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`."
    +  name: "output"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Subtracts sparse updates to a variable reference.
    +--
    +--     # Scalar indices
    +--     ref[indices, ...] -= updates[...]
    +-- 
    +--     # Vector indices (for each i)
    +--     ref[indices[i], ...] -= updates[i, ...]
    +-- 
    +--     # High rank indices (for each i, ..., j)
    +--     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
    +-- 
    +-- This operation outputs `ref` after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +-- 
    +-- Duplicate entries are handled correctly: if multiple `indices` reference
    +-- the same location, their (negated) contributions add.
    +-- 
    +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/ScatterSub.png" alt>
    +-- </div>
    +scatterSub :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t,
    +                                            TensorType tindices,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
    +              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`.
    +              -> Tensor Value t -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
    +              -- to use the updated values after the update is done.
    +scatterSub ref indices updates | eqLengthGuard [] =
    +    buildOp (opDef "ScatterSub"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        ref indices updates
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A tensor of indices into the first dimension of `ref`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "A tensor of updated values to subtract from `ref`."
    +  name: "updates"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Adds sparse updates to a variable reference.
    +--
    +-- This operation computes
    +-- 
    +--     # Scalar indices
    +--     ref[indices, ...] += updates[...]
    +-- 
    +--     # Vector indices (for each i)
    +--     ref[indices[i], ...] += updates[i, ...]
    +-- 
    +--     # High rank indices (for each i, ..., j)
    +--     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
    +-- 
    +-- This operation outputs `ref` after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +-- 
    +-- Duplicate entries are handled correctly: if multiple `indices` reference
    +-- the same location, their contributions add.
    +-- 
    +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/ScatterAdd.png" alt>
    +-- </div>
    +scatterAdd :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t,
    +                                            TensorType tindices,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
    +              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to add to `ref`.
    +              -> Tensor Value t -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
    +              -- to use the updated values after the update is done.
    +scatterAdd ref indices updates | eqLengthGuard [] =
    +    buildOp (opDef "ScatterAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        ref indices updates
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A tensor of indices into the first dimension of `ref`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "A tensor of updated values to add to `ref`."
    +  name: "updates"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Applies sparse updates to a variable reference.
    +--
    +-- This operation computes
    +-- 
    +--     # Scalar indices
    +--     ref[indices, ...] = updates[...]
    +-- 
    +--     # Vector indices (for each i)
    +--     ref[indices[i], ...] = updates[i, ...]
    +-- 
    +--     # High rank indices (for each i, ..., j)
    +--     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
    +-- 
    +-- This operation outputs `ref` after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +-- 
    +-- If values in `ref` is to be updated more than once, because there are
    +-- duplicate entires in `indices`, the order at which the updates happen
    +-- for each value is undefined.
    +-- 
    +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/ScatterUpdate.png" alt>
    +-- </div>
    +scatterUpdate :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                               TensorType tindices,
    +                                               OneOf '[Data.Int.Int32,
    +                                                       Data.Int.Int64] tindices) =>
    +                 Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +                 -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
    +                 -> Tensor v3 t -- ^ __updates__: A tensor of updated values to store in `ref`.
    +                 -> Tensor Value t -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
    +                 -- to use the updated values after the update is done.
    +scatterUpdate ref indices updates | eqLengthGuard [] =
    +    buildOp (opDef "ScatterUpdate"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        ref indices updates
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A tensor of indices into the first dimension of `ref`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "A tensor of updated values to store in `ref`."
    +  name: "updates"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update 'ref' by subtracting 'value' from it.
    +--
    +-- This operation outputs "ref" after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +assignSub :: forall v1 v2 t . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Int.Int16, Data.Int.Int32,
    +                                       Data.Int.Int64, Data.Int.Int8,
    +                                       Data.Word.Word16, Data.Word.Word8,
    +                                       Double, Float] t) =>
    +             Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +             -> Tensor v2 t -- ^ __value__: The value to be subtracted to the variable.
    +             -> Tensor Value t -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
    +             -- to use the new value after the variable has been updated.
    +assignSub ref value | eqLengthGuard [] =
    +    buildOp (opDef "AssignSub"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        ref value
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The value to be subtracted to the variable."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update 'ref' by adding 'value' to it.
    +--
    +-- This operation outputs "ref" after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +assignAdd :: forall v1 v2 t . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Int.Int16, Data.Int.Int32,
    +                                       Data.Int.Int64, Data.Int.Int8,
    +                                       Data.Word.Word16, Data.Word.Word8,
    +                                       Double, Float] t) =>
    +             Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +             -> Tensor v2 t -- ^ __value__: The value to be added to the variable.
    +             -> Tensor Value t -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
    +             -- to use the new value after the variable has been updated.
    +assignAdd ref value | eqLengthGuard [] =
    +    buildOp (opDef "AssignAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        ref value
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The value to be added to the variable."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes gradients for SparseSegmentMean.
    +--
    +-- Returns tensor "output" with same shape as grad, except for dimension 0 whose
    +-- value is output_dim0.
    +sparseSegmentMeanGrad :: forall v1 v2 v3 v4 t tidx . (TensorType t,
    +                                                      OneOf '[Double, Float] t,
    +                                                      TensorType tidx,
    +                                                      OneOf '[Data.Int.Int32,
    +                                                              Data.Int.Int64] tidx) =>
    +                         Tensor v1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op.
    +                         -> Tensor v2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op.
    +                         -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op.
    +                         -> Tensor v4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op.
    +                         -> Tensor Value t -- ^ __output__
    +sparseSegmentMeanGrad grad indices segment_ids output_dim0 | eqLengthGuard [] =
    +    buildOp (opDef "SparseSegmentMeanGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        grad indices segment_ids output_dim0
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "gradient propagated to the SparseSegmentMean op."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "indices passed to the corresponding SparseSegmentMean op."
    +  name: "indices"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "segment_ids passed to the corresponding SparseSegmentMean op."
    +  name: "segment_ids"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "dimension 0 of \"data\" passed to SparseSegmentMean op."
    +  name: "output_dim0"
    +  type: DT_INT32
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Applies softmax to a batched N-D `SparseTensor`.
    +--
    +-- The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
    +-- (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
    +-- 
    +-- This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
    +-- logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
    +-- zero elements do not participate*.  Specifically, the algorithm is equivalent
    +-- to the following:
    +-- 
    +--   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
    +--       with shape `[B, C]`, along the size-C dimension;
    +--   (2) Masks out the original implicitly-zero locations;
    +--   (3) Renormalizes the remaining elements.
    +-- 
    +-- Hence, the `SparseTensor` result has exactly the same non-zero indices and
    +-- shape.
    +sparseSoftmax :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                 Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
    +                                          -- SparseTensor, in canonical ordering.
    +                 -> Tensor v2 t -- ^ __sp_values__: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
    +                 -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
    +                 -> Tensor Value t -- ^ __output__: 1-D.  The `NNZ` values for the result `SparseTensor`.
    +sparseSoftmax sp_indices sp_values sp_shape | eqLengthGuard [] =
    +    buildOp (opDef "SparseSoftmax"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sp_indices sp_values sp_shape
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering."
    +  name: "sp_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `NNZ` non-empty values corresponding to `sp_indices`."
    +  name: "sp_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "sp_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  The `NNZ` values for the result `SparseTensor`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Solves systems of linear equations.
    +--
    +-- `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
    +-- form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
    +-- a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
    +-- satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
    +-- If `adjoint` is `True` then each output matrix satisfies
    +-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
    +matrixSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +               Tensor v1 t -- ^ __matrix__: Shape is `[..., M, M]`.
    +               -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
    +               -> Tensor Value t -- ^ __output__: Shape is `[..., M, K]`.
    +matrixSolve matrix rhs | eqLengthGuard [] =
    +    buildOp (opDef "MatrixSolve"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint."
    +  name: "adjoint"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`."
    +  name: "matrix"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[..., M, K]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the eigen decomposition of one or more square self-adjoint matrices.
    +--
    +-- Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
    +-- `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
    +-- 
    +-- ```prettyprint
    +-- # a is a tensor.
    +-- # e is a tensor of eigenvalues.
    +-- # v is a tensor of eigenvectors.
    +-- e, v = self_adjoint_eig(a)
    +-- e = self_adjoint_eig(a, compute_v=False)
    +-- ```
    +selfAdjointEigV2 :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                    Tensor v1 t -- ^ __input__: `Tensor` input of shape `[N, N]`.
    +                    -> (Tensor Value t, Tensor Value t)
    +                    -- ^ (__e__, __v__)
    +                    --
    +                    -- * __e__: Eigenvalues. Shape is `[N]`.
    +                    --
    +                    -- * __v__: Eigenvectors. Shape is `[N, N]`.
    +selfAdjointEigV2 input | eqLengthGuard [] =
    +    buildOp (opDef "SelfAdjointEigV2"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed."
    +  name: "compute_v"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "`Tensor` input of shape `[N, N]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Eigenvalues. Shape is `[N]`."
    +  name: "e"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Eigenvectors. Shape is `[N, N]`."
    +  name: "v"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
    +--
    +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
    +-- form square matrices, with the same constraints as the single matrix
    +-- SelfAdjointEig.
    +-- 
    +-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the
    +-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
    +selfAdjointEig :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                  Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
    +                  -> Tensor Value t -- ^ __output__: Shape is `[..., M+1, M]`.
    +selfAdjointEig input | eqLengthGuard [] =
    +    buildOp (opDef "SelfAdjointEig"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[..., M+1, M]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' by subtracting 'alpha' * 'delta' from it.
    +
    +applyGradientDescent :: forall v1 v2 v3 t . (TensorType t,
    +                                             OneOf '[(Data.Complex.Complex Double),
    +                                                     (Data.Complex.Complex Float),
    +                                                     Data.Int.Int16,
    +                                                     Data.Int.Int32,
    +                                                     Data.Int.Int64,
    +                                                     Data.Int.Int8,
    +                                                     Data.Word.Word16,
    +                                                     Data.Word.Word8, Double,
    +                                                     Float] t) =>
    +                        Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                        -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
    +                        -> Tensor v3 t -- ^ __delta__: The change.
    +                        -> Tensor Value t -- ^ __out__: Same as "var".
    +applyGradientDescent var alpha delta | eqLengthGuard [] =
    +    buildOp (opDef "ApplyGradientDescent"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var alpha delta
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "alpha"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The change." name: "delta" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Push an element onto the stack.
    +
    +stackPush :: forall v1 v2 t . (TensorType t) =>
    +             Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
    +             -> Tensor v2 t -- ^ __elem__: The tensor to be pushed onto the stack.
    +             -> Tensor Value t -- ^ __output__: The same tensor as the input 'elem'.
    +stackPush handle elem | eqLengthGuard [] =
    +    buildOp (opDef "StackPush"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        handle elem
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { b: false }
    +  description: "Swap `elem` to CPU. Default to false."
    +  name: "swap_memory"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "The handle to a stack."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The tensor to be pushed onto the stack."
    +  name: "elem"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as the input \'elem\'."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the Cholesky decomposition of one or more square matrices.
    +--
    +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
    +-- form square matrices, with the same constraints as the single matrix Cholesky
    +-- decomposition above. The output is a tensor of the same shape as the input
    +-- containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
    +cholesky :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +            Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
    +            -> Tensor Value t -- ^ __output__: Shape is `[..., M, M]`.
    +cholesky input | eqLengthGuard [] =
    +    buildOp (opDef "Cholesky"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[..., M, M]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Interleave the values from the `data` tensors into a single tensor.
    +--
    +-- Builds a merged tensor such that
    +-- 
    +--     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
    +-- 
    +-- For example, if each `indices[m]` is scalar or vector, we have
    +-- 
    +--     # Scalar indices
    +--     merged[indices[m], ...] = data[m][...]
    +-- 
    +--     # Vector indices
    +--     merged[indices[m][i], ...] = data[m][i, ...]
    +-- 
    +-- Each `data[i].shape` must start with the corresponding `indices[i].shape`,
    +-- and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
    +-- must have `data[i].shape = indices[i].shape + constant`.  In terms of this
    +-- `constant`, the output shape is
    +-- 
    +--     merged.shape = [max(indices)] + constant
    +-- 
    +-- Values are merged in order, so if an index appears in both `indices[m][i]` and
    +-- `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
    +-- merged result.
    +-- 
    +-- For example:
    +-- 
    +--     indices[0] = 6
    +--     indices[1] = [4, 1]
    +--     indices[2] = [[5, 2], [0, 3]]
    +--     data[0] = [61, 62]
    +--     data[1] = [[41, 42], [11, 12]]
    +--     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
    +--     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
    +--               [51, 52], [61, 62]]
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/DynamicStitch.png" alt>
    +-- </div>
    +dynamicStitch :: forall v1 v2 t . (TensorType t) =>
    +                 [Tensor v1 Data.Int.Int32] -- ^ __indices__
    +                 -> [Tensor v2 t] -- ^ __data__
    +                 -> Tensor Value t -- ^ __merged__
    +dynamicStitch indices data' | eqLengthGuard [("N", [("data'", length data'),
    +                                                    ("indices", length indices)])] =
    +    buildOp (opDef "DynamicStitch"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length data') :: Int64))
    +        indices data'
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +attr { name: "T" type: "type" }
    +input_arg { name: "indices" number_attr: "N" type: DT_INT32 }
    +input_arg { name: "data" number_attr: "N" type_attr: "T" }
    +output_arg { name: "merged" type_attr: "T" }
    +-}
    +
    +-- | Returns the number of work units this Reader has finished processing.
    +
    +readerNumWorkUnitsCompleted :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +                               -> Tensor Value Data.Int.Int64 -- ^ __units_completed__
    +readerNumWorkUnitsCompleted reader_handle | eqLengthGuard [] =
    +    buildOp (opDef "ReaderNumWorkUnitsCompleted")
    +        reader_handle
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +output_arg { name: "units_completed" type: DT_INT64 }
    +-}
    +
    +-- | Returns the next record (key, value pair) produced by a Reader.
    +--
    +-- Will dequeue from the input queue if necessary (e.g. when the
    +-- Reader needs to start reading from a new file since it has finished
    +-- with the previous file).
    +readerRead :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +              -> Tensor v2 Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items.
    +              -> (Tensor Value Data.ByteString.ByteString,
    +                  Tensor Value Data.ByteString.ByteString)
    +              -- ^ (__key__, __value__)
    +              --
    +              -- * __key__: A scalar.
    +              --
    +              -- * __value__: A scalar.
    +readerRead reader_handle queue_handle | eqLengthGuard [] =
    +    buildOp (opDef "ReaderRead")
    +        reader_handle queue_handle
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Handle to a Queue, with string work items."
    +  is_ref: true
    +  name: "queue_handle"
    +  type: DT_STRING
    +}
    +output_arg { description: "A scalar." name: "key" type: DT_STRING }
    +output_arg {
    +  description: "A scalar." name: "value" type: DT_STRING
    +}
    +-}
    +
    +-- | Compute the 2-dimensional discrete Fourier Transform over the inner-most
    +--
    +-- 2 dimensions of `input`.
    +fFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +         -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
    +         -- dimensions of `input` are replaced with their 2D Fourier Transform.
    +fFT2D input | eqLengthGuard [] =
    +    buildOp (opDef "FFT2D")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\ndimensions of `input` are replaced with their 2D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | A Reader that outputs fixed-length records from a file.
    +
    +fixedLengthRecordReader :: Data.Int.Int64 -- ^ __record_bytes__
    +                           -> Tensor Value Data.ByteString.ByteString -- ^ __reader_handle__: The handle to reference the Reader.
    +fixedLengthRecordReader record_bytes | eqLengthGuard [] =
    +    buildOp (opDef "FixedLengthRecordReader"
    +             & opAttr "record_bytes" .~ record_bytes)
    +        
    +{-
    +attr { default_value { i: 0 } name: "header_bytes" type: "int" }
    +attr { name: "record_bytes" type: "int" }
    +attr { default_value { i: 0 } name: "footer_bytes" type: "int" }
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to reference the Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | A placeholder op for a value that will be fed into the computation.
    +--
    +-- N.B. This operation will fail with an error if it is executed. It is
    +-- intended as a way to represent a value that will always be fed, and to
    +-- provide attrs that enable the fed value to be checked at runtime.
    +placeholder :: forall dtype . (TensorType dtype) =>
    +               Tensor Value dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
    +placeholder  | eqLengthGuard [] =
    +    buildOp (opDef "Placeholder"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        
    +{-
    +attr {
    +  description: "The type of elements in the tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { shape { } }
    +  description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained."
    +  name: "shape"
    +  type: "shape"
    +}
    +output_arg {
    +  description: "A placeholder tensor that must be replaced using the feed mechanism."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Outputs a `Summary` protocol buffer with scalar values.
    +--
    +-- The input `tags` and `values` must have the same shape.  The generated summary
    +-- has a summary value for each tag-value pair in `tags` and `values`.
    +scalarSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                         Data.Int.Int32,
    +                                                         Data.Int.Int64,
    +                                                         Data.Int.Int8,
    +                                                         Data.Word.Word16,
    +                                                         Data.Word.Word8,
    +                                                         Double, Float] t) =>
    +                 Tensor v1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary.
    +                 -> Tensor v2 t -- ^ __values__: Same shape as `tags.  Values for the summary.
    +                 -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar.  Serialized `Summary` protocol buffer.
    +scalarSummary tags values | eqLengthGuard [] =
    +    buildOp (opDef "ScalarSummary"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tags values
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Tags for the summary." name: "tags" type: DT_STRING
    +}
    +input_arg {
    +  description: "Same shape as `tags.  Values for the summary."
    +  name: "values"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Scalar.  Serialized `Summary` protocol buffer."
    +  name: "summary"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Computes softmax activations.
    +--
    +-- For each batch `i` and class `j` we have
    +-- 
    +--     softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
    +softmax :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                Float] t) =>
    +           Tensor v1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
    +           -> Tensor Value t -- ^ __softmax__: Same shape as `logits`.
    +softmax logits | eqLengthGuard [] =
    +    buildOp (opDef "Softmax"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        logits
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D with shape `[batch_size, num_classes]`."
    +  name: "logits"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same shape as `logits`."
    +  name: "softmax"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Generate a sharded filename. The filename is printf formatted as
    +--
    +--    %s-%05d-of-%05d, basename, shard, num_shards.
    +shardedFilename :: Tensor v1 Data.ByteString.ByteString -- ^ __basename__
    +                   -> Tensor v2 Data.Int.Int32 -- ^ __shard__
    +                   -> Tensor v3 Data.Int.Int32 -- ^ __num_shards__
    +                   -> Tensor Value Data.ByteString.ByteString -- ^ __filename__
    +shardedFilename basename shard num_shards | eqLengthGuard [] =
    +    buildOp (opDef "ShardedFilename")
    +        basename shard num_shards
    +{-
    +input_arg { name: "basename" type: DT_STRING }
    +input_arg { name: "shard" type: DT_INT32 }
    +input_arg { name: "num_shards" type: DT_INT32 }
    +output_arg { name: "filename" type: DT_STRING }
    +-}
    +
    +-- | Sends the named tensor from send_device to recv_device.
    +--
    +-- _HostSend requires its input on host memory whereas _Send requires its
    +-- input on device memory.
    +_HostSend :: forall v1 t . (TensorType t) =>
    +             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
    +             -> Tensor v1 t -- ^ __tensor__: The tensor to send.
    +             -> ControlNode
    +_HostSend send_device_incarnation tensor | eqLengthGuard [] =
    +    buildOp (opDef "_HostSend"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "send_device_incarnation" .~ send_device_incarnation)
    +        tensor
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The name of the tensor to send."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  description: "The name of the device sending the tensor."
    +  name: "send_device"
    +  type: "string"
    +}
    +attr {
    +  description: "The current incarnation of send_device."
    +  name: "send_device_incarnation"
    +  type: "int"
    +}
    +attr {
    +  description: "The name of the device receiving the tensor."
    +  name: "recv_device"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
    +  name: "client_terminated"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "The tensor to send." name: "tensor" type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient of the sigmoid of `x` wrt its input.
    +--
    +-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
    +-- `dy` is the corresponding input gradient.
    +sigmoidGrad :: forall v1 v2 t . (TensorType t,
    +                                 OneOf '[(Data.Complex.Complex Double),
    +                                         (Data.Complex.Complex Float),
    +                                         Data.Word.Word16, Double, Float] t) =>
    +               Tensor v1 t -- ^ __x__
    +               -> Tensor v2 t -- ^ __y__
    +               -> Tensor Value t -- ^ __z__
    +sigmoidGrad x y | eqLengthGuard [] =
    +    buildOp (opDef "SigmoidGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Greedily selects a subset of bounding boxes in descending order of score,
    +--
    +-- pruning away boxes that have high intersection-over-union (IOU) overlap
    +-- with previously selected boxes.  Bounding boxes are supplied as
    +-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
    +-- diagonal pair of box corners and the coordinates can be provided as normalized
    +-- (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
    +-- is agnostic to where the origin is in the coordinate system.  Note that this
    +-- algorithm is invariant to orthogonal transformations and translations
    +-- of the coordinate system; thus translating or reflections of the coordinate
    +-- system result in the same boxes being selected by the algorithm.
    +-- 
    +-- The output of this operation is a set of integers indexing into the input
    +-- collection of bounding boxes representing the selected boxes.  The bounding
    +-- box coordinates corresponding to the selected indices can then be obtained
    +-- using the tf.gather operation.  For example:
    +-- 
    +--   selected_indices = tf.image.non_max_suppression(
    +--       boxes, scores, max_output_size, iou_threshold)
    +--   selected_boxes = tf.gather(boxes, selected_indices)
    +nonMaxSuppression :: Tensor v1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
    +                     -> Tensor v2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
    +                                        -- score corresponding to each box (each row of boxes).
    +                     -> Tensor v3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
    +                                                 -- boxes to be selected by non max suppression.
    +                     -> Tensor Value Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
    +                     -- indices from the boxes tensor, where `M <= max_output_size`.
    +nonMaxSuppression boxes scores max_output_size | eqLengthGuard [] =
    +    buildOp (opDef "NonMaxSuppression")
    +        boxes scores max_output_size
    +{-
    +attr {
    +  default_value { f: 0.5 }
    +  description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU."
    +  name: "iou_threshold"
    +  type: "float"
    +}
    +input_arg {
    +  description: "A 2-D float tensor of shape `[num_boxes, 4]`."
    +  name: "boxes"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)."
    +  name: "scores"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression."
    +  name: "max_output_size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`."
    +  name: "selected_indices"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | A Reader that outputs the queued work as both the key and value.
    +--
    +-- To use, enqueue strings in a Queue.  ReaderRead will take the front
    +-- work string and output (work, work).
    +identityReader :: Tensor Value Data.ByteString.ByteString -- ^ __reader_handle__: The handle to reference the Reader.
    +identityReader  | eqLengthGuard [] =
    +    buildOp (opDef "IdentityReader")
    +        
    +{-
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to reference the Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Extracts a glimpse from the input tensor.
    +--
    +-- Returns a set of windows called glimpses extracted at location
    +-- `offsets` from the input tensor. If the windows only partially
    +-- overlaps the inputs, the non overlapping areas will be filled with
    +-- random noise.
    +-- 
    +-- The result is a 4-D tensor of shape `[batch_size, glimpse_height,
    +-- glimpse_width, channels]`. The channels and batch dimensions are the
    +-- same as that of the input tensor. The height and width of the output
    +-- windows are specified in the `size` parameter.
    +-- 
    +-- The argument `normalized` and `centered` controls how the windows are built:
    +-- 
    +-- * If the coordinates are normalized but not centered, 0.0 and 1.0
    +--   correspond to the minimum and maximum of each height and width
    +--   dimension.
    +-- * If the coordinates are both normalized and centered, they range from
    +--   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
    +--   left corner, the lower right corner is located at (1.0, 1.0) and the
    +--   center is at (0, 0).
    +-- * If the coordinates are not normalized they are interpreted as
    +--   numbers of pixels.
    +extractGlimpse :: Tensor v1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
    +                  -> Tensor v2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses
    +                                              -- to extract.  The glimpse height must be specified first, following
    +                                              -- by the glimpse width.
    +                  -> Tensor v3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing
    +                                     -- the x, y locations of the center of each window.
    +                  -> Tensor Value Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size,
    +                  -- glimpse_height, glimpse_width, channels]`.
    +extractGlimpse input size offsets | eqLengthGuard [] =
    +    buildOp (opDef "ExtractGlimpse")
    +        input size offsets
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images."
    +  name: "centered"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "indicates if the offset coordinates are normalized."
    +  name: "normalized"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "indicates if the noise should be generated using a\nuniform distribution or a gaussian distribution."
    +  name: "uniform_noise"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`."
    +  name: "input"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract.  The glimpse height must be specified first, following\nby the glimpse width."
    +  name: "size"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe x, y locations of the center of each window."
    +  name: "offsets"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`."
    +  name: "glimpse"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes the gradients of 3-D convolution with respect to the input.
    +
    +conv3DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t) =>
    +                       Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
    +                       -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
    +                                      -- `in_channels` must match between `input` and `filter`.
    +                       -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
    +                                      -- out_channels]`.
    +                       -> Tensor Value t -- ^ __output__
    +conv3DBackpropInput input filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv3DBackpropInput"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Solves one or more linear least-squares problems.
    +--
    +-- `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
    +-- form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`.
    +-- The output is a tensor shape `[..., N, K]` where each output matrix solves
    +-- each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]
    +-- in the least squares sense.
    +-- 
    +-- matrix and right-hand sides in the batch:
    +-- 
    +-- `matrix`=\\(A \in \Re^{m \times n}\\),
    +-- `rhs`=\\(B  \in \Re^{m \times k}\\),
    +-- `output`=\\(X  \in \Re^{n \times k}\\),
    +-- `l2_regularizer`=\\(\lambda\\).
    +-- 
    +-- If `fast` is `True`, then the solution is computed by solving the normal
    +-- equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
    +-- \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
    +-- problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
    +-- \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
    +-- \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
    +-- minimum-norm solution to the under-determined linear system, i.e.
    +-- \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
    +-- \\(A Z = B\\). Notice that the fast path is only numerically stable when
    +-- \\(A\\) is numerically full rank and has a condition number
    +-- \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\) is
    +-- sufficiently large.
    +-- 
    +-- If `fast` is `False` an algorithm based on the numerically robust complete
    +-- orthogonal decomposition is used. This computes the minimum-norm
    +-- least-squares solution, even when \\(A\\) is rank deficient. This path is
    +-- typically 6-7 times slower than the fast path. If `fast` is `False` then
    +-- `l2_regularizer` is ignored.
    +matrixSolveLs :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                 Tensor v1 t -- ^ __matrix__: Shape is `[..., M, N]`.
    +                 -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
    +                 -> Tensor v3 Double -- ^ __l2_regularizer__: Scalar tensor.
    +                 -> Tensor Value t -- ^ __output__: Shape is `[..., N, K]`.
    +matrixSolveLs matrix rhs l2_regularizer | eqLengthGuard [] =
    +    buildOp (opDef "MatrixSolveLs"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs l2_regularizer
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +attr { default_value { b: true } name: "fast" type: "bool" }
    +input_arg {
    +  description: "Shape is `[..., M, N]`."
    +  name: "matrix"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
    +}
    +input_arg {
    +  description: "Scalar tensor."
    +  name: "l2_regularizer"
    +  type: DT_DOUBLE
    +}
    +output_arg {
    +  description: "Shape is `[..., N, K]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Converts one or more images from RGB to HSV.
    +--
    +-- Outputs a tensor of the same shape as the `images` tensor, containing the HSV
    +-- value of the pixels. The output is only well defined if the value in `images`
    +-- are in `[0,1]`.
    +-- 
    +-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
    +-- `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
    +-- corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
    +rGBToHSV :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +            Tensor v1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
    +            -> Tensor Value t -- ^ __output__: `images` converted to HSV.
    +rGBToHSV images | eqLengthGuard [] =
    +    buildOp (opDef "RGBToHSV"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3."
    +  name: "images"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "`images` converted to HSV."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Decode the first frame of a GIF-encoded image to a uint8 tensor.
    +--
    +-- GIF with frame or transparency compression are not supported
    +-- convert animated GIF from compressed to uncompressed by:
    +-- 
    +-- convert $src.gif -coalesce $dst.gif
    +decodeGif :: Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The GIF-encoded image.
    +             -> Tensor Value Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order
    +decodeGif contents | eqLengthGuard [] =
    +    buildOp (opDef "DecodeGif")
    +        contents
    +{-
    +input_arg {
    +  description: "0-D.  The GIF-encoded image."
    +  name: "contents"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "4-D with shape `[num_frames, height, width, 3]`. RGB order"
    +  name: "image"
    +  type: DT_UINT8
    +}
    +-}
    +
    +-- | Deprecated. Disallowed in GraphDef version >= 2.
    +
    +adjustContrast :: forall v1 v2 v3 v4 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                                Data.Int.Int32,
    +                                                                Data.Int.Int64,
    +                                                                Data.Int.Int8,
    +                                                                Data.Word.Word8,
    +                                                                Double,
    +                                                                Float] t) =>
    +                  Tensor v1 t -- ^ __images__
    +                  -> Tensor v2 Float -- ^ __contrast_factor__
    +                  -> Tensor v3 Float -- ^ __min_value__
    +                  -> Tensor v4 Float -- ^ __max_value__
    +                  -> Tensor Value Float -- ^ __output__
    +adjustContrast images contrast_factor min_value max_value | eqLengthGuard [] =
    +    buildOp (opDef "AdjustContrast"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images contrast_factor min_value max_value
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "images" type_attr: "T" }
    +input_arg { name: "contrast_factor" type: DT_FLOAT }
    +input_arg { name: "min_value" type: DT_FLOAT }
    +input_arg { name: "max_value" type: DT_FLOAT }
    +output_arg { name: "output" type: DT_FLOAT }
    +-}
    +
    +-- | DepthToSpace for tensors of type T.
    +--
    +-- Rearranges data from depth into blocks of spatial data.
    +-- This is the reverse transformation of SpaceToDepth. More specifically,
    +-- this op outputs a copy of the input tensor where values from the `depth`
    +-- dimension are moved in spatial blocks to the `height` and `width` dimensions.
    +-- The attr `block_size` indicates the input block size and how the data is moved.
    +-- 
    +--   * Chunks of data of size `block_size * block_size` from depth are rearranged
    +--     into non-overlapping blocks of size `block_size x block_size`
    +--   * The width the output tensor is `input_depth * block_size`, whereas the
    +--     height is `input_height * block_size`.
    +--   * The depth of the input tensor must be divisible by
    +--     `block_size * block_size`.
    +-- 
    +-- That is, assuming the input is in the shape:
    +-- `[batch, height, width, depth]`,
    +-- the shape of the output will be:
    +-- `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`
    +-- 
    +-- This operation requires that the input tensor be of rank 4, and that
    +-- `block_size` be >=1 and that `block_size * block_size` be a divisor of the
    +-- input depth.
    +-- 
    +-- This operation is useful for resizing the activations between convolutions
    +-- (but keeping all data), e.g. instead of pooling. It is also useful for training
    +-- purely convolutional models.
    +-- 
    +-- For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1, 2, 3, 4]]]]
    +-- 
    +-- ```
    +-- 
    +-- This operation will output a tensor of shape `[1, 2, 2, 1]`:
    +-- 
    +-- ```prettyprint
    +--    [[[[1], [2]],
    +--      [[3], [4]]]]
    +-- ```
    +-- 
    +-- Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
    +-- the corresponding output will have 2x2 elements and will have a depth of
    +-- 1 channel (1 = `4 / (block_size * block_size)`).
    +-- The output element shape is `[2, 2, 1]`.
    +-- 
    +-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
    +-- ```
    +-- 
    +-- This operation, for block size of 2, will return the following tensor of shape
    +-- `[1, 2, 2, 3]`
    +-- 
    +-- ```prettyprint
    +--    [[[[1, 2, 3], [4, 5, 6]],
    +--      [[7, 8, 9], [10, 11, 12]]]]
    +-- 
    +-- ```
    +-- 
    +-- Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
    +-- 
    +-- ```prettyprint
    +-- x =  [[[[1, 2, 3, 4],
    +--        [5, 6, 7, 8]],
    +--       [[9, 10, 11, 12],
    +--        [13, 14, 15, 16]]]]
    +-- ```
    +-- 
    +-- the operator will return the following tensor of shape `[1 4 4 1]`:
    +-- 
    +-- ```prettyprint
    +-- x = [[ [1],   [2],  [5],  [6]],
    +--      [ [3],   [4],  [7],  [8]],
    +--      [ [9],  [10], [13],  [14]],
    +--      [ [11], [12], [15],  [16]]]
    +-- 
    +-- ```
    +depthToSpace :: forall v1 t . (TensorType t) =>
    +                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth.
    +                -> Tensor v1 t -- ^ __input__
    +                -> Tensor Value t -- ^ __output__
    +depthToSpace block_size input | eqLengthGuard [] =
    +    buildOp (opDef "DepthToSpace"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "block_size" .~ block_size)
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The size of the spatial block, same as in Space2Depth."
    +  has_minimum: true
    +  minimum: 2
    +  name: "block_size"
    +  type: "int"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchMatrixSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                    Tensor v1 t -- ^ __matrix__
    +                    -> Tensor v2 t -- ^ __rhs__
    +                    -> Tensor Value t -- ^ __output__
    +batchMatrixSolve matrix rhs | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixSolve"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs
    +{-
    +attr { default_value { b: false } name: "adjoint" type: "bool" }
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "matrix" type_attr: "T" }
    +input_arg { name: "rhs" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the complementary error function of `x` element-wise.
    +
    +erfc :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +erfc x | eqLengthGuard [] =
    +    buildOp (opDef "Erfc"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradient of bilinear interpolation.
    +
    +resizeBilinearGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                              Double,
    +                                                              Float] t) =>
    +                      Tensor v1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
    +                      -> Tensor v2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`,
    +                                     -- The image tensor that was resized.
    +                      -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`.
    +                      -- Gradients with respect to the input image. Input image must have been
    +                      -- float or double.
    +resizeBilinearGrad grads original_image | eqLengthGuard [] =
    +    buildOp (opDef "ResizeBilinearGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        grads original_image
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "grads"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized."
    +  name: "original_image"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Output a fact about factorials.
    +
    +fact :: Tensor Value Data.ByteString.ByteString -- ^ __fact__
    +fact  | eqLengthGuard [] =
    +    buildOp (opDef "Fact")
    +        
    +{-
    +output_arg { name: "fact" type: DT_STRING }
    +-}
    +
    +-- | Delete the tensor specified by its handle in the session.
    +
    +deleteSessionTensor :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
    +                       -> ControlNode
    +deleteSessionTensor handle | eqLengthGuard [] =
    +    buildOp (opDef "DeleteSessionTensor")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle for a tensor stored in the session state."
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Returns the truth value of x OR y element-wise.
    +--
    +-- *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +logicalOr :: Tensor v1 Bool -- ^ __x__
    +             -> Tensor v2 Bool -- ^ __y__
    +             -> Tensor Value Bool -- ^ __z__
    +logicalOr x y | eqLengthGuard [] =
    +    buildOp (opDef "LogicalOr")
    +        x y
    +{-
    +input_arg { name: "x" type: DT_BOOL }
    +input_arg { name: "y" type: DT_BOOL }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Get the value of the tensor specified by its handle.
    +
    +getSessionTensor :: forall v1 dtype . (TensorType dtype) =>
    +                    Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
    +                    -> Tensor Value dtype -- ^ __value__: The tensor for the given handle.
    +getSessionTensor handle | eqLengthGuard [] =
    +    buildOp (opDef "GetSessionTensor"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        handle
    +{-
    +attr {
    +  description: "The type of the output value."
    +  name: "dtype"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The handle for a tensor stored in the session state."
    +  name: "handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The tensor for the given handle."
    +  name: "value"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | 
    +
    +batchMatrixInverse :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                      Tensor v1 t -- ^ __input__
    +                      -> Tensor Value t -- ^ __output__
    +batchMatrixInverse input | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixInverse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { default_value { b: false } name: "adjoint" type: "bool" }
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Generate a glob pattern matching all sharded file names.
    +
    +shardedFilespec :: Tensor v1 Data.ByteString.ByteString -- ^ __basename__
    +                   -> Tensor v2 Data.Int.Int32 -- ^ __num_shards__
    +                   -> Tensor Value Data.ByteString.ByteString -- ^ __filename__
    +shardedFilespec basename num_shards | eqLengthGuard [] =
    +    buildOp (opDef "ShardedFilespec")
    +        basename num_shards
    +{-
    +input_arg { name: "basename" type: DT_STRING }
    +input_arg { name: "num_shards" type: DT_INT32 }
    +output_arg { name: "filename" type: DT_STRING }
    +-}
    +
    +-- | Decode web-safe base64-encoded strings.
    +--
    +-- Input may or may not have padding at the end. See EncodeBase64 for padding.
    +-- Web-safe means that input must use - and _ instead of + and /.
    +decodeBase64 :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode.
    +                -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Decoded strings.
    +decodeBase64 input | eqLengthGuard [] =
    +    buildOp (opDef "DecodeBase64")
    +        input
    +{-
    +input_arg {
    +  description: "Base64 strings to decode."
    +  name: "input"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "Decoded strings." name: "output" type: DT_STRING
    +}
    +-}
    +
    +-- | Store the input tensor in the state of the current session.
    +
    +getSessionHandle :: forall v1 t . (TensorType t) =>
    +                    Tensor v1 t -- ^ __value__: The tensor to be stored.
    +                    -> Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle for the tensor stored in the session state.
    +getSessionHandle value | eqLengthGuard [] =
    +    buildOp (opDef "GetSessionHandle"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be stored."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The handle for the tensor stored in the session state."
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Table initializer that takes two tensors for keys and values respectively.
    +
    +initializeTable :: forall v1 v2 v3 tkey tval . (TensorType tkey,
    +                                                TensorType tval) =>
    +                   Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
    +                   -> Tensor v2 tkey -- ^ __keys__: Keys of type Tkey.
    +                   -> Tensor v3 tval -- ^ __values__: Values of type Tval.
    +                   -> ControlNode
    +initializeTable table_handle keys values | eqLengthGuard [] =
    +    buildOp (opDef "InitializeTable"
    +             & opAttr "Tkey" .~ tensorType (undefined :: tkey)
    +             & opAttr "Tval" .~ tensorType (undefined :: tval))
    +        table_handle keys values
    +{-
    +attr { name: "Tkey" type: "type" }
    +attr { name: "Tval" type: "type" }
    +input_arg {
    +  description: "Handle to a table which will be initialized."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Keys of type Tkey." name: "keys" type_attr: "Tkey"
    +}
    +input_arg {
    +  description: "Values of type Tval."
    +  name: "values"
    +  type_attr: "Tval"
    +}
    +-}
    +
    +-- | Computes tan of x element-wise.
    +
    +tan :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Int.Int32, Data.Int.Int64,
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +tan x | eqLengthGuard [] =
    +    buildOp (opDef "Tan"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes hyperbolic tangent of `x` element-wise.
    +
    +tanh :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +tanh x | eqLengthGuard [] =
    +    buildOp (opDef "Tanh"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Update '*var' according to the proximal adagrad scheme.
    +
    +applyAdagradDA :: forall v1 v2 v3 v4 v5 v6 v7 v8 t . (TensorType t,
    +                                                      OneOf '[(Data.Complex.Complex Double),
    +                                                              (Data.Complex.Complex Float),
    +                                                              Data.Int.Int16,
    +                                                              Data.Int.Int32,
    +                                                              Data.Int.Int64,
    +                                                              Data.Int.Int8,
    +                                                              Data.Word.Word16,
    +                                                              Data.Word.Word8,
    +                                                              Double,
    +                                                              Float] t) =>
    +                  Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                  -> Tensor v2 t -- ^ __gradient_accumulator__: Should be from a Variable().
    +                  -> Tensor v3 t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
    +                  -> Tensor v4 t -- ^ __grad__: The gradient.
    +                  -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                  -> Tensor v6 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                  -> Tensor v7 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                  -> Tensor v8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
    +                  -> Tensor Value t -- ^ __out__: Same as "var".
    +applyAdagradDA var gradient_accumulator gradient_squared_accumulator grad lr l1
    +               l2 global_step | eqLengthGuard [] =
    +    buildOp (opDef "ApplyAdagradDA"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var gradient_accumulator gradient_squared_accumulator grad lr l1 l2
    +        global_step
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "gradient_accumulator"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "gradient_squared_accumulator"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Training step number. Must be a scalar."
    +  name: "global_step"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
    +--
    +-- The hash function is deterministic on the content of the string within the
    +-- process.
    +-- 
    +-- Note that the hash function may change from time to time.
    +-- This functionality will be deprecated and it's recommended to use
    +-- `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
    +stringToHashBucket :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
    +                      -> Tensor v1 Data.ByteString.ByteString -- ^ __string_tensor__
    +                      -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
    +stringToHashBucket num_buckets string_tensor | eqLengthGuard [] =
    +    buildOp (opDef "StringToHashBucket"
    +             & opAttr "num_buckets" .~ num_buckets)
    +        string_tensor
    +{-
    +attr {
    +  description: "The number of buckets."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_buckets"
    +  type: "int"
    +}
    +input_arg { name: "string_tensor" type: DT_STRING }
    +output_arg {
    +  description: "A Tensor of the same shape as the input `string_tensor`."
    +  name: "output"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Computes gradients for the exponential linear (Elu) operation.
    +
    +eluGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                   Data.Int.Int32,
    +                                                   Data.Int.Int64,
    +                                                   Data.Int.Int8,
    +                                                   Data.Word.Word16,
    +                                                   Data.Word.Word8, Double,
    +                                                   Float] t) =>
    +           Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation.
    +           -> Tensor v2 t -- ^ __outputs__: The outputs of the corresponding Elu operation.
    +           -> Tensor Value t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0,
    +           -- `gradients` otherwise.
    +eluGrad gradients outputs | eqLengthGuard [] =
    +    buildOp (opDef "EluGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        gradients outputs
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The backpropagated gradients to the corresponding Elu operation."
    +  name: "gradients"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The outputs of the corresponding Elu operation."
    +  name: "outputs"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise."
    +  name: "backprops"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes gradient of the FractionalAvgPool function.
    +--
    +-- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
    +-- FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
    +-- out_backprop to those indices that form the same pooling cell. Therefore, we
    +-- just need to know the shape of original input tensor, instead of the whole
    +-- tensor.
    +fractionalAvgPoolGrad :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                                 OneOf '[Data.Int.Int32,
    +                                                         Data.Int.Int64, Double,
    +                                                         Float] t) =>
    +                         Tensor v1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool`
    +                         -> Tensor v2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
    +                                        -- w.r.t. the output of `fractional_avg_pool`.
    +                         -> Tensor v3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
    +                                                     -- col_pooling_sequence.
    +                         -> Tensor v4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
    +                                                     -- row_pooling sequence.
    +                         -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
    +fractionalAvgPoolGrad orig_input_tensor_shape out_backprop row_pooling_sequence
    +                      col_pooling_sequence | eqLengthGuard [] =
    +    buildOp (opDef "FractionalAvgPoolGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input_tensor_shape out_backprop row_pooling_sequence
    +        col_pooling_sequence
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
    +  name: "overlapping"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Original input tensor shape for `fractional_avg_pool`"
    +  name: "orig_input_tensor_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_avg_pool`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
    +  name: "row_pooling_sequence"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
    +  name: "col_pooling_sequence"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "4-D.  Gradients w.r.t. the input of `fractional_avg_pool`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Solves systems of linear equations with upper or lower triangular matrices by
    +--
    +-- backsubstitution.
    +-- 
    +-- `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
    +-- square matrices. If `lower` is `True` then the strictly upper triangular part
    +-- of each inner-most matrix is assumed to be zero and not accessed.
    +-- If `lower` is False then the strictly lower triangular part of each inner-most
    +-- matrix is assumed to be zero and not accessed.
    +-- `rhs` is a tensor of shape `[..., M, K]`.
    +-- 
    +-- The output is a tensor of shape `[..., M, K]`. If `adjoint` is
    +-- `True` then the innermost matrices in output` satisfy matrix equations
    +-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
    +-- If `adjoint` is `False` then the strictly then the  innermost matrices in
    +-- `output` satisfy matrix equations
    +-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
    +matrixTriangularSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double,
    +                                                                 Float] t) =>
    +                         Tensor v1 t -- ^ __matrix__: Shape is `[..., M, M]`.
    +                         -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
    +                         -> Tensor Value t -- ^ __output__: Shape is `[..., M, K]`.
    +matrixTriangularSolve matrix rhs | eqLengthGuard [] =
    +    buildOp (opDef "MatrixTriangularSolve"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular."
    +  name: "lower"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint."
    +  name: "adjoint"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`."
    +  name: "matrix"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[..., M, K]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the (possibly normalized) Levenshtein Edit Distance.
    +--
    +-- The inputs are variable-length sequences provided by SparseTensors
    +--   (hypothesis_indices, hypothesis_values, hypothesis_shape)
    +-- and
    +--   (truth_indices, truth_values, truth_shape).
    +-- 
    +-- The inputs are:
    +editDistance :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t) =>
    +                Tensor v1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor.
    +                                         -- This is an N x R int64 matrix.
    +                -> Tensor v2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor.
    +                               -- This is an N-length vector.
    +                -> Tensor v3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor.
    +                                            -- This is an R-length vector.
    +                -> Tensor v4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor.
    +                                            -- This is an M x R int64 matrix.
    +                -> Tensor v5 t -- ^ __truth_values__: The values of the truth list SparseTensor.
    +                               -- This is an M-length vector.
    +                -> Tensor v6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector.
    +                -> Tensor Value Float -- ^ __output__: A dense float tensor with rank R - 1.
    +                -- 
    +                -- For the example input:
    +                -- 
    +                --     // hypothesis represents a 2x1 matrix with variable-length values:
    +                --     //   (0,0) = ["a"]
    +                --     //   (1,0) = ["b"]
    +                --     hypothesis_indices = [[0, 0, 0],
    +                --                           [1, 0, 0]]
    +                --     hypothesis_values = ["a", "b"]
    +                --     hypothesis_shape = [2, 1, 1]
    +                -- 
    +                --     // truth represents a 2x2 matrix with variable-length values:
    +                --     //   (0,0) = []
    +                --     //   (0,1) = ["a"]
    +                --     //   (1,0) = ["b", "c"]
    +                --     //   (1,1) = ["a"]
    +                --     truth_indices = [[0, 1, 0],
    +                --                      [1, 0, 0],
    +                --                      [1, 0, 1],
    +                --                      [1, 1, 0]]
    +                --     truth_values = ["a", "b", "c", "a"]
    +                --     truth_shape = [2, 2, 2]
    +                --     normalize = true
    +                -- 
    +                -- The output will be:
    +                -- 
    +                --     // output is a 2x2 matrix with edit distances normalized by truth lengths.
    +                --     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
    +                --               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
    +editDistance hypothesis_indices hypothesis_values hypothesis_shape truth_indices
    +             truth_values truth_shape | eqLengthGuard [] =
    +    buildOp (opDef "EditDistance"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        hypothesis_indices hypothesis_values hypothesis_shape truth_indices
    +        truth_values truth_shape
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:"
    +  name: "normalize"
    +  type: "bool"
    +}
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix."
    +  name: "hypothesis_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector."
    +  name: "hypothesis_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector."
    +  name: "hypothesis_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix."
    +  name: "truth_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "The values of the truth list SparseTensor.\nThis is an M-length vector."
    +  name: "truth_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "truth indices, vector."
    +  name: "truth_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n    // hypothesis represents a 2x1 matrix with variable-length values:\n    //   (0,0) = [\"a\"]\n    //   (1,0) = [\"b\"]\n    hypothesis_indices = [[0, 0, 0],\n                          [1, 0, 0]]\n    hypothesis_values = [\"a\", \"b\"]\n    hypothesis_shape = [2, 1, 1]\n\n    // truth represents a 2x2 matrix with variable-length values:\n    //   (0,0) = []\n    //   (0,1) = [\"a\"]\n    //   (1,0) = [\"b\", \"c\"]\n    //   (1,1) = [\"a\"]\n    truth_indices = [[0, 1, 0],\n                     [1, 0, 0],\n                     [1, 0, 1],\n                     [1, 1, 0]]\n    truth_values = [\"a\", \"b\", \"c\", \"a\"]\n    truth_shape = [2, 2, 2]\n    normalize = true\n\nThe output will be:\n\n    // output is a 2x2 matrix with edit distances normalized by truth lengths.\n    output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis\n              [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis"
    +  name: "output"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes the number of incomplete elements in the given barrier.
    +
    +barrierIncompleteSize :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
    +                         -> Tensor Value Data.Int.Int32 -- ^ __size__: The number of incomplete elements (i.e. those with some of their value
    +                         -- components not set) in the barrier.
    +barrierIncompleteSize handle | eqLengthGuard [] =
    +    buildOp (opDef "BarrierIncompleteSize")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle to a barrier."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier."
    +  name: "size"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Generates labels for candidate sampling with a learned unigram distribution.
    +--
    +-- See explanations of candidate sampling and the data formats at
    +-- go/candidate-sampling.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +threadUnsafeUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
    +                                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                                       -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
    +                                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                                               -- candidates in a batch are unique. This requires some approximation to
    +                                               -- estimate the post-rejection sampling probabilities.
    +                                       -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                                   -- IDs of the num_true target_classes in the corresponding original label.
    +                                       -> (Tensor Value Data.Int.Int64,
    +                                           Tensor Value Float,
    +                                           Tensor Value Float)
    +                                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                                       --
    +                                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                                       -- the ID of a sampled candidate.
    +                                       --
    +                                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                                       -- the number of times each candidate is expected to occur in a batch
    +                                       -- of sampled candidates. If unique=true, then this is a probability.
    +                                       --
    +                                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                                       -- candidate representing the number of times the candidate is expected
    +                                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                                       -- probability.
    +threadUnsafeUnigramCandidateSampler num_sampled num_true range_max unique
    +                                    true_classes | eqLengthGuard [] =
    +    buildOp (opDef "ThreadUnsafeUnigramCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "range_max" .~ range_max
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to randomly sample per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  description: "The sampler will sample integers from the interval [0, range_max)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "range_max"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes the number of complete elements in the given barrier.
    +
    +barrierReadySize :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
    +                    -> Tensor Value Data.Int.Int32 -- ^ __size__: The number of complete elements (i.e. those with all of their value
    +                    -- components set) in the barrier.
    +barrierReadySize handle | eqLengthGuard [] =
    +    buildOp (opDef "BarrierReadySize")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle to a barrier."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier."
    +  name: "size"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Closes the given barrier.
    +--
    +-- This operation signals that no more new elements will be inserted in the
    +-- given barrier. Subsequent InsertMany that try to introduce a new key will fail.
    +-- Subsequent InsertMany operations that just add missing components to already
    +-- existing elements will continue to succeed. Subsequent TakeMany operations will
    +-- continue to succeed if sufficient completed elements remain in the barrier.
    +-- Subsequent TakeMany operations that would block will fail immediately.
    +barrierClose :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
    +                -> ControlNode
    +barrierClose handle | eqLengthGuard [] =
    +    buildOp (opDef "BarrierClose")
    +        handle
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be cancelled. InsertMany will fail, even\nif no new key is introduced."
    +  name: "cancel_pending_enqueues"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "The handle to a barrier."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | A Reader that outputs the lines of a file delimited by '\n'.
    +
    +textLineReader :: Tensor Value Data.ByteString.ByteString -- ^ __reader_handle__: The handle to reference the Reader.
    +textLineReader  | eqLengthGuard [] =
    +    buildOp (opDef "TextLineReader")
    +        
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "Number of lines to skip from the beginning of every file."
    +  name: "skip_header_lines"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to reference the Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Compute the 3-dimensional discrete Fourier Transform over the inner-most 3
    +--
    +-- dimensions of `input`.
    +fFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +         -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
    +         -- dimensions of `input` are replaced with their 3D Fourier Transform.
    +fFT3D input | eqLengthGuard [] =
    +    buildOp (opDef "FFT3D")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\ndimensions of `input` are replaced with their 3D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | Exits the current frame to its parent frame.
    +--
    +-- Exit makes its input `data` available to the parent frame.
    +refExit :: forall v1 t . (TensorType t) =>
    +           Tensor v1 t -- ^ __data__: The tensor to be made available to the parent frame.
    +           -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +refExit data' | eqLengthGuard [] =
    +    buildOp (opDef "RefExit"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be made available to the parent frame."
    +  is_ref: true
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  is_ref: true
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes exponential of x element-wise.  \\(y = e^x\\).
    +
    +exp :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +exp x | eqLengthGuard [] =
    +    buildOp (opDef "Exp"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Restores a tensor from checkpoint files.
    +--
    +-- This is like `Restore` except that restored tensor can be listed as filling
    +-- only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
    +-- larger tensor and the slice that the restored tensor covers.
    +-- 
    +-- The `shape_and_slice` input has the same format as the
    +-- elements of the `shapes_and_slices` input of the `SaveSlices` op.
    +restoreSlice :: forall v1 v2 v3 dt . (TensorType dt) =>
    +                Tensor v1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
    +                                                     -- which we read the tensor.
    +                -> Tensor v2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
    +                                                        -- restored.
    +                -> Tensor v3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when
    +                                                        -- restoring a tensors.
    +                -> Tensor Value dt -- ^ __tensor__: The restored tensor.
    +restoreSlice file_pattern tensor_name shape_and_slice | eqLengthGuard [] =
    +    buildOp (opDef "RestoreSlice"
    +             & opAttr "dt" .~ tensorType (undefined :: dt))
    +        file_pattern tensor_name shape_and_slice
    +{-
    +attr {
    +  description: "The type of the tensor to be restored."
    +  name: "dt"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`."
    +  name: "preferred_shard"
    +  type: "int"
    +}
    +input_arg {
    +  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
    +  name: "file_pattern"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Must have a single element. The name of the tensor to be\nrestored."
    +  name: "tensor_name"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors."
    +  name: "shape_and_slice"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The restored tensor." name: "tensor" type_attr: "dt"
    +}
    +-}
    +
    +-- | Returns the complex conjugate of a complex number.
    +--
    +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
    +-- complex numbers that are the complex conjugate of each element in `input`. The
    +-- complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
    +-- real part and *b* is the imaginary part.
    +-- 
    +-- The complex conjugate returned by this operation is of the form \\(a - bj\\).
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
    +-- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
    +-- ```
    +conj :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float)] t) =>
    +        Tensor v1 t -- ^ __input__
    +        -> Tensor Value t -- ^ __output__
    +conj input | eqLengthGuard [] =
    +    buildOp (opDef "Conj"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
    +  default_value { type: DT_COMPLEX64 }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradient of nearest neighbor interpolation.
    +
    +resizeNearestNeighborGrad :: forall v1 v2 t . (TensorType t,
    +                                               OneOf '[Data.Int.Int32,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +                             Tensor v1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
    +                             -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
    +                                                         -- original input size.
    +                             -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
    +                             -- with respect to the input image.
    +resizeNearestNeighborGrad grads size | eqLengthGuard [] =
    +    buildOp (opDef "ResizeNearestNeighborGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        grads size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT32
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "grads"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size."
    +  name: "size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Delete the TensorArray from its resource container.  This enables
    +--
    +-- the user to close and release the resource in the middle of a step/run.
    +tensorArrayClose :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
    +                    -> ControlNode
    +tensorArrayClose handle | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayClose")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Computes atan of x element-wise.
    +
    +atan :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int32, Data.Int.Int64,
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +atan x | eqLengthGuard [] =
    +    buildOp (opDef "Atan"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Get the current size of the TensorArray.
    +
    +tensorArraySize :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
    +                   -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                   -> Tensor Value Data.Int.Int32 -- ^ __size__: The current size of the TensorArray.
    +tensorArraySize handle flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArraySize")
    +        handle flow_in
    +{-
    +input_arg {
    +  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "The current size of the TensorArray."
    +  name: "size"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Concat the elements from the TensorArray into value `value`.
    +--
    +-- Takes `T` elements of shapes
    +-- 
    +--   ```
    +--   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
    +--   ```
    +-- 
    +-- and concatenates them into a Tensor of shape:
    +-- 
    +--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
    +-- 
    +-- All elements must have the same shape (excepting the first dimension).
    +tensorArrayConcat :: forall v1 v2 dtype . (TensorType dtype) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                     -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                     -> (Tensor Value dtype, Tensor Value Data.Int.Int64)
    +                     -- ^ (__value__, __lengths__)
    +                     --
    +                     -- * __value__: All of the elements in the TensorArray, concatenated along the first
    +                     -- axis.
    +                     --
    +                     -- * __lengths__: A vector of the row sizes of the original T elements in the
    +                     -- value output.  In the example above, this would be the values:
    +                     -- `(n1, n2, ..., n(T-1))`.
    +tensorArrayConcat handle flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayConcat"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        handle flow_in
    +{-
    +attr {
    +  description: "The type of the elem that is returned."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { shape { unknown_rank: true } }
    +  description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error."
    +  name: "element_shape_except0"
    +  type: "shape"
    +}
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "All of the elements in the TensorArray, concatenated along the first\naxis."
    +  name: "value"
    +  type_attr: "dtype"
    +}
    +output_arg {
    +  description: "A vector of the row sizes of the original T elements in the\nvalue output.  In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`."
    +  name: "lengths"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Local Response Normalization.
    +--
    +-- The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
    +-- dimension), and each vector is normalized independently.  Within a given vector,
    +-- each component is divided by the weighted, squared sum of inputs within
    +-- `depth_radius`.  In detail,
    +-- 
    +--     sqr_sum[a, b, c, d] =
    +--         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
    +--     output = input / (bias + alpha * sqr_sum) ** beta
    +-- 
    +-- For details, see [Krizhevsky et al., ImageNet classification with deep
    +-- convolutional neural networks (NIPS 2012)]
    +-- (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
    +lRN :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Float] t) =>
    +       Tensor v1 t -- ^ __input__: 4-D.
    +       -> Tensor Value t -- ^ __output__
    +lRN input | eqLengthGuard [] =
    +    buildOp (opDef "LRN"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  default_value { i: 5 }
    +  description: "0-D.  Half-width of the 1-D normalization window."
    +  name: "depth_radius"
    +  type: "int"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "An offset (usually positive to avoid dividing by 0)."
    +  name: "bias"
    +  type: "float"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "A scale factor, usually positive."
    +  name: "alpha"
    +  type: "float"
    +}
    +attr {
    +  default_value { f: 0.5 }
    +  description: "An exponent."
    +  name: "beta"
    +  type: "float"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { description: "4-D." name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Converts each string in the input Tensor to its hash mod by a number of buckets.
    +--
    +-- The hash function is deterministic on the content of the string within the
    +-- process and will never change. However, it is not suitable for cryptography.
    +-- This function may be used when CPU time is scarce and inputs are trusted or
    +-- unimportant. There is a risk of adversaries constructing inputs that all hash
    +-- to the same bucket. To prevent this problem, use a strong hash function with
    +-- `tf.string_to_hash_bucket_strong`.
    +stringToHashBucketFast :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
    +                          -> Tensor v1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
    +                          -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
    +stringToHashBucketFast num_buckets input | eqLengthGuard [] =
    +    buildOp (opDef "StringToHashBucketFast"
    +             & opAttr "num_buckets" .~ num_buckets)
    +        input
    +{-
    +attr {
    +  description: "The number of buckets."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_buckets"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The strings to assign a hash bucket."
    +  name: "input"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A Tensor of the same shape as the input `string_tensor`."
    +  name: "output"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Pack the elements from the TensorArray into output `value`.
    +--
    +-- **WARNING: This op is deprecated.**
    +-- 
    +-- Instead of this op, use `TensorArrayGather` with
    +-- `indices = RangeOp(0, TensorArraySizeOp)`.
    +-- 
    +-- All elements must have the same shape.
    +tensorArrayPack :: forall v1 v2 dtype . (TensorType dtype) =>
    +                   Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                   -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                   -> Tensor Value dtype -- ^ __value__: All of the elements in the TensorArray, concatenated along a new
    +                   -- axis (the new dimension 0).
    +tensorArrayPack handle flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayPack"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        handle flow_in
    +{-
    +attr {
    +  description: "The type of the elem that is returned."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { shape { unknown_rank: true } }
    +  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, packing zero-size TensorArrays is an error."
    +  name: "element_shape"
    +  type: "shape"
    +}
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)."
    +  name: "value"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes offsets of concat inputs within its output.
    +--
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'x' is [2, 2, 7]
    +-- # 'y' is [2, 3, 7]
    +-- # 'z' is [2, 5, 7]
    +-- concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
    +-- ```
    +concatOffset :: Tensor v1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate.
    +                -> [Tensor v2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated.
    +                -> [Tensor Value Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset
    +                --         of input tensors within the concatenated output.
    +                -- 
    +                -- This is typically used by gradient computations for a concat operation.
    +concatOffset concat_dim
    +             shape | eqLengthGuard [("N", [("shape", length shape)])] =
    +    buildOp (opDef "ConcatOffset"
    +             & opAttr "N" .~ (fromIntegral (length shape) :: Int64))
    +        concat_dim shape
    +{-
    +attr { has_minimum: true minimum: 2 name: "N" type: "int" }
    +input_arg {
    +  description: "The dimension along which to concatenate."
    +  name: "concat_dim"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "The `N` int32 vectors representing shape of tensors being concatenated."
    +  name: "shape"
    +  number_attr: "N"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "The `N` int32 vectors representing the starting offset\n        of input tensors within the concatenated output.\n\nThis is typically used by gradient computations for a concat operation."
    +  name: "offset"
    +  number_attr: "N"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Creates or finds a child frame, and makes `data` available to the child frame.
    +--
    +-- The unique `frame_name` is used by the `Executor` to identify frames. If
    +-- `is_constant` is true, `output` is a constant in the child frame; otherwise
    +-- it may be changed in the child frame. At most `parallel_iterations` iterations
    +-- are run in parallel in the child frame.
    +refEnter :: forall v1 t . (TensorType t) =>
    +            Tensor v1 t -- ^ __data__: The tensor to be made available to the child frame.
    +            -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +refEnter data' | eqLengthGuard [] =
    +    buildOp (opDef "RefEnter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The name of the child frame."
    +  name: "frame_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, the output is constant within the child frame."
    +  name: "is_constant"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 10 }
    +  description: "The number of iterations allowed to run in parallel."
    +  name: "parallel_iterations"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The tensor to be made available to the child frame."
    +  is_ref: true
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  is_ref: true
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes softsign: `features / (abs(features) + 1)`.
    +
    +softsign :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t) =>
    +            Tensor v1 t -- ^ __features__
    +            -> Tensor Value t -- ^ __activations__
    +softsign features | eqLengthGuard [] =
    +    buildOp (opDef "Softsign"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "features" type_attr: "T" }
    +output_arg { name: "activations" type_attr: "T" }
    +-}
    +
    +-- | Push an element onto the tensor_array.
    +
    +tensorArrayWrite :: forall v1 v2 v3 v4 t . (TensorType t) =>
    +                    Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                    -> Tensor v2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray.
    +                    -> Tensor v3 t -- ^ __value__: The tensor to write to the TensorArray.
    +                    -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                    -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
    +tensorArrayWrite handle index value flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayWrite"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        handle index value flow_in
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The position to write to inside the TensorArray."
    +  name: "index"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "The tensor to write to the TensorArray."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_out"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Returns a diagonal tensor with a given diagonal values.
    +--
    +-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
    +-- everything else padded with zeros. The diagonal is computed as follows:
    +-- 
    +-- Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
    +-- rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
    +-- 
    +-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'diagonal' is [1, 2, 3, 4]
    +-- tf.diag(diagonal) ==> [[1, 0, 0, 0]
    +--                        [0, 2, 0, 0]
    +--                        [0, 0, 3, 0]
    +--                        [0, 0, 0, 4]]
    +-- ```
    +diag :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int32, Data.Int.Int64,
    +                                             Double, Float] t) =>
    +        Tensor v1 t -- ^ __diagonal__: Rank k tensor where k is at most 3.
    +        -> Tensor Value t -- ^ __output__
    +diag diagonal | eqLengthGuard [] =
    +    buildOp (opDef "Diag"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        diagonal
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Rank k tensor where k is at most 3."
    +  name: "diagonal"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns the batched diagonal part of a batched tensor.
    +--
    +-- This operation returns a tensor with the `diagonal` part
    +-- of the batched `input`. The `diagonal` part is computed as follows:
    +-- 
    +-- Assume `input` has `k` dimensions `[I, J, K, ..., N, N]`, then the output is a
    +-- tensor of rank `k - 1` with dimensions `[I, J, K, ..., N]` where:
    +-- 
    +-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
    +-- 
    +-- The input must be at least a matrix.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'input' is [[[1, 0, 0, 0]
    +--                [0, 2, 0, 0]
    +--                [0, 0, 3, 0]
    +--                [0, 0, 0, 4]],
    +--               [[5, 0, 0, 0]
    +--                [0, 6, 0, 0]
    +--                [0, 0, 7, 0]
    +--                [0, 0, 0, 8]]]
    +-- 
    +-- and input.shape = (2, 4, 4)
    +-- 
    +-- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
    +-- 
    +-- which has shape (2, 4)
    +-- ```
    +matrixDiagPart :: forall v1 t . (TensorType t) =>
    +                  Tensor v1 t -- ^ __input__: Rank `k` tensor where `k >= 2` and the last two dimensions are equal.
    +                  -> Tensor Value t -- ^ __diagonal__: The extracted diagonal(s) having shape
    +                  -- `diagonal.shape = input.shape[:-1]`.
    +matrixDiagPart input | eqLengthGuard [] =
    +    buildOp (opDef "MatrixDiagPart"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "Rank `k` tensor where `k >= 2` and the last two dimensions are equal."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-1]`."
    +  name: "diagonal"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the number of elements in the given queue.
    +
    +queueSize :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
    +             -> Tensor Value Data.Int.Int32 -- ^ __size__: The number of elements in the given queue.
    +queueSize handle | eqLengthGuard [] =
    +    buildOp (opDef "QueueSize")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle to a queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The number of elements in the given queue."
    +  name: "size"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Decode a PNG-encoded image to a uint8 or uint16 tensor.
    +--
    +-- The attr `channels` indicates the desired number of color channels for the
    +-- decoded image.
    +-- 
    +-- Accepted values are:
    +-- 
    +-- *   0: Use the number of channels in the PNG-encoded image.
    +-- *   1: output a grayscale image.
    +-- *   3: output an RGB image.
    +-- *   4: output an RGBA image.
    +-- 
    +-- If needed, the PNG-encoded image is transformed to match the requested number
    +-- of color channels.
    +decodePng :: forall v1 dtype . (TensorType dtype, OneOf '[Data.Word.Word16,
    +                                                          Data.Word.Word8] dtype) =>
    +             Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The PNG-encoded image.
    +             -> Tensor Value dtype -- ^ __image__: 3-D with shape `[height, width, channels]`.
    +decodePng contents | eqLengthGuard [] =
    +    buildOp (opDef "DecodePng"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        contents
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "Number of color channels for the decoded image."
    +  name: "channels"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
    +  default_value { type: DT_UINT8 }
    +  name: "dtype"
    +  type: "type"
    +}
    +input_arg {
    +  description: "0-D.  The PNG-encoded image."
    +  name: "contents"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "3-D with shape `[height, width, channels]`."
    +  name: "image"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Returns element-wise smallest integer in not less than x.
    +
    +ceil :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +ceil x | eqLengthGuard [] =
    +    buildOp (opDef "Ceil"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | A queue that produces elements sorted by the first component value.
    +--
    +-- Note that the PriorityQueue requires the first component of any element
    +-- to be a scalar int64, in addition to the other elements declared by
    +-- component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
    +-- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
    +-- entry in their input (resp. output) lists.
    +priorityQueue :: Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle to the queue.
    +priorityQueue  | eqLengthGuard [] =
    +    buildOp (opDef "PriorityQueue")
    +        
    +{-
    +attr {
    +  default_value { list { } }
    +  description: "The type of each component in a value."
    +  has_minimum: true
    +  name: "component_types"
    +  type: "list(type)"
    +}
    +attr {
    +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
    +  has_minimum: true
    +  name: "shapes"
    +  type: "list(shape)"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
    +  name: "capacity"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to the queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | A placeholder op that passes though `input` when its output is not fed.
    +
    +placeholderWithDefault :: forall v1 dtype . (TensorType dtype) =>
    +                          Tensor v1 dtype -- ^ __input__: The default value to produce when `output` is not fed.
    +                          -> Tensor Value dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed.
    +placeholderWithDefault input | eqLengthGuard [] =
    +    buildOp (opDef "PlaceholderWithDefault"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        input
    +{-
    +attr {
    +  description: "The type of elements in the tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  description: "The (possibly partial) shape of the tensor."
    +  name: "shape"
    +  type: "shape"
    +}
    +input_arg {
    +  description: "The default value to produce when `output` is not fed."
    +  name: "input"
    +  type_attr: "dtype"
    +}
    +output_arg {
    +  description: "A placeholder tensor that defaults to `input` if it is not fed."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes the gradient of the crop_and_resize op wrt the input image tensor.
    +
    +cropAndResizeGradImage :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                                  OneOf '[Data.Word.Word16,
    +                                                          Double, Float] t) =>
    +                          Tensor v1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
    +                          -> Tensor v2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
    +                                             -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
    +                                             -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
    +                                             -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
    +                                             -- `[0, 1]` interval of normalized image height is mapped to
    +                                             -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
    +                                             -- which case the sampled crop is an up-down flipped version of the original
    +                                             -- image. The width dimension is treated similarly. Normalized coordinates
    +                                             -- outside the `[0, 1]` range are allowed, in which case we use
    +                                             -- `extrapolation_value` to extrapolate the input image values.
    +                          -> Tensor v3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
    +                                                      -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
    +                          -> Tensor v4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]`
    +                                                      -- containing the original image size. Both `image_height` and `image_width` need
    +                                                      -- to be positive.
    +                          -> Tensor Value t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
    +cropAndResizeGradImage grads boxes box_ind image_size | eqLengthGuard [] =
    +    buildOp (opDef "CropAndResizeGradImage"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        grads boxes box_ind image_size
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "bilinear" } }
    +  default_value { s: "bilinear" }
    +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    +  name: "method"
    +  type: "string"
    +}
    +input_arg {
    +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    +  name: "grads"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    +  name: "boxes"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    +  name: "box_ind"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive."
    +  name: "image_size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Restore a Reader to its initial clean state.
    +
    +readerReset :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +               -> ControlNode
    +readerReset reader_handle | eqLengthGuard [] =
    +    buildOp (opDef "ReaderReset")
    +        reader_handle
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Extract `patches` from `images` and put them in the "depth" output dimension.
    +
    +extractImagePatches :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double, Float] t) =>
    +                       Tensor v1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
    +                       -> Tensor Value t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
    +                       -- ksize_cols * depth]` containing image patches with size
    +                       -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.
    +extractImagePatches images | eqLengthGuard [] =
    +    buildOp (opDef "ExtractImagePatches"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images
    +{-
    +attr {
    +  description: "The size of the sliding window for each dimension of `images`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksizes"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1), followed by\nsubsampling them spatially by a factor of `rates`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "rates"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n      ksizes = [1, ksize_rows, ksize_cols, 1]\n      strides = [1, strides_rows, strides_cols, 1]\n      rates = [1, rates_rows, rates_cols, 1]"
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`."
    +  name: "images"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension."
    +  name: "patches"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | 
    +
    +batchMatrixSetDiag :: forall v1 v2 t . (TensorType t) =>
    +                      Tensor v1 t -- ^ __input__
    +                      -> Tensor v2 t -- ^ __diagonal__
    +                      -> Tensor Value t -- ^ __output__
    +batchMatrixSetDiag input diagonal | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixSetDiag"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input diagonal
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "diagonal" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Delete the stack from its resource container.
    +
    +stackClose :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
    +              -> ControlNode
    +stackClose handle | eqLengthGuard [] =
    +    buildOp (opDef "StackClose")
    +        handle
    +{-
    +input_arg {
    +  description: "The handle to a stack."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Quantizes then dequantizes a tensor.
    +--
    +-- This op simulates the precision loss from the quantized forward pass by:
    +-- 1. Quantizing the tensor to fixed point numbers, which should match the target
    +--    quantization method when it is used in inference.
    +-- 2. Dequantizing it back to floating point numbers for the following ops, most
    +--    likely matmul.
    +-- 
    +-- There are different ways to quantize. This version does not use the full range
    +-- of the output type, choosing to elide the lowest possible value for symmetry
    +-- (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
    +-- quantization), so that 0.0 maps to 0.
    +-- 
    +-- To perform this op, we first find the range of values in our tensor. The range
    +-- we use is always centered on 0, so we find m such that
    +-- 
    +-- 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
    +-- 2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
    +-- 
    +-- Our input tensor range is then [-m, m].
    +-- 
    +-- Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
    +-- If signed_input is true, this is
    +-- 
    +--   [min_fixed, max_fixed ] =
    +--       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
    +-- 
    +-- Otherwise, if signed_input is false, the fixed-point range is
    +-- 
    +--   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
    +-- 
    +-- From this we compute our scaling factor, s:
    +-- 
    +--   s = (max_fixed - min_fixed) / (2 * m).
    +-- 
    +-- Now we can quantize and dequantize the elements of our tensor.  An element e
    +-- is transformed into e':
    +-- 
    +--   e' = (e * s).round_to_nearest() / s.
    +-- 
    +-- Note that we have a different number of buckets in the signed vs. unsigned
    +-- cases.  For example, if num_bits == 8, we get 254 buckets in the signed case
    +-- vs. 255 in the unsigned case.
    +-- 
    +-- For example, suppose num_bits = 8 and m = 1.  Then
    +-- 
    +--   [min_fixed, max_fixed] = [-127, 127], and
    +--   s = (127 + 127) / 2 = 127.
    +-- 
    +-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
    +-- {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
    +quantizeAndDequantize :: forall v1 t . (TensorType t, OneOf '[Double,
    +                                                              Float] t) =>
    +                         Tensor v1 t -- ^ __input__: Tensor to quantize and then dequantize.
    +                         -> Tensor Value t -- ^ __output__
    +quantizeAndDequantize input | eqLengthGuard [] =
    +    buildOp (opDef "QuantizeAndDequantize"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "If the quantization is signed or unsigned."
    +  name: "signed_input"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 8 }
    +  description: "The bitwidth of the quantization."
    +  name: "num_bits"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If the range is given or should be computed from the tensor."
    +  name: "range_given"
    +  type: "bool"
    +}
    +attr {
    +  default_value { f: 0.0 }
    +  description: "If range is given, this is the min of the range."
    +  name: "input_min"
    +  type: "float"
    +}
    +attr {
    +  default_value { f: 0.0 }
    +  description: "If range is given, this is the max of the range."
    +  name: "input_max"
    +  type: "float"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Tensor to quantize and then dequantize."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns which elements of x are NaN.
    +
    +isNan :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                              Float] t) =>
    +         Tensor v1 t -- ^ __x__
    +         -> Tensor Value Bool -- ^ __y__
    +isNan x | eqLengthGuard [] =
    +    buildOp (opDef "IsNan"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type: DT_BOOL }
    +-}
    +
    +-- | Returns locations of true values in a boolean tensor.
    +--
    +-- This operation returns the coordinates of true elements in `input`. The
    +-- coordinates are returned in a 2-D tensor where the first dimension (rows)
    +-- represents the number of true elements, and the second dimension (columns)
    +-- represents the coordinates of the true elements. Keep in mind, the shape of
    +-- the output tensor can vary depending on how many true values there are in
    +-- `input`. Indices are output in row-major order.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'input' tensor is [[True, False]
    +-- #                    [True, False]]
    +-- # 'input' has two true values, so output has two coordinates.
    +-- # 'input' has rank of 2, so coordinates have two indices.
    +-- where(input) ==> [[0, 0],
    +--                   [1, 0]]
    +-- 
    +-- # `input` tensor is [[[True, False]
    +-- #                     [True, False]]
    +-- #                    [[False, True]
    +-- #                     [False, True]]
    +-- #                    [[False, False]
    +-- #                     [False, True]]]
    +-- # 'input' has 5 true values, so output has 5 coordinates.
    +-- # 'input' has rank of 3, so coordinates have three indices.
    +-- where(input) ==> [[0, 0, 0],
    +--                   [0, 1, 0],
    +--                   [1, 0, 1],
    +--                   [1, 1, 1],
    +--                   [2, 1, 1]]
    +-- ```
    +where' :: Tensor v1 Bool -- ^ __input__
    +          -> Tensor Value Data.Int.Int64 -- ^ __index__
    +where' input | eqLengthGuard [] =
    +    buildOp (opDef "Where")
    +        input
    +{-
    +input_arg { name: "input" type: DT_BOOL }
    +output_arg { name: "index" type: DT_INT64 }
    +-}
    +
    +-- | Computes the difference between two lists of numbers or strings.
    +--
    +-- Given a list `x` and a list `y`, this operation returns a list `out` that
    +-- represents all values that are in `x` but not in `y`. The returned list `out`
    +-- is sorted in the same order that the numbers appear in `x` (duplicates are
    +-- preserved). This operation also returns a list `idx` that represents the
    +-- position of each `out` element in `x`. In other words:
    +-- 
    +-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
    +-- 
    +-- For example, given this input:
    +-- 
    +-- ```prettyprint
    +-- x = [1, 2, 3, 4, 5, 6]
    +-- y = [1, 3, 5]
    +-- ```
    +-- 
    +-- This operation would return:
    +-- 
    +-- ```prettyprint
    +-- out ==> [2, 4, 6]
    +-- idx ==> [1, 3, 5]
    +-- ```
    +listDiff :: forall v1 v2 t out_idx . (TensorType t, TensorType out_idx,
    +                                      OneOf '[Data.Int.Int32,
    +                                              Data.Int.Int64] out_idx) =>
    +            Tensor v1 t -- ^ __x__: 1-D. Values to keep.
    +            -> Tensor v2 t -- ^ __y__: 1-D. Values to remove.
    +            -> (Tensor Value t, Tensor Value out_idx)
    +            -- ^ (__out__, __idx__)
    +            --
    +            -- * __out__: 1-D. Values present in `x` but not in `y`.
    +            --
    +            -- * __idx__: 1-D. Positions of `x` values preserved in `out`.
    +listDiff x y | eqLengthGuard [] =
    +    buildOp (opDef "ListDiff"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
    +        x y
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_idx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D. Values to keep." name: "x" type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D. Values to remove." name: "y" type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D. Values present in `x` but not in `y`."
    +  name: "out"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D. Positions of `x` values preserved in `out`."
    +  name: "idx"
    +  type_attr: "out_idx"
    +}
    +-}
    +
    +-- | Return a strided slice from `input`.
    +--
    +-- The output tensor is a tensor with dimensions implied by `begin`,
    +-- `end`, and `strides`, whose values are extracted from `begin`.
    +-- 
    +-- Specifically, the result tensor at index `(i[0], i[1], ..., i[n-1])`
    +-- will obtain the value `input[begin[0] + i[0] * stride[0], ..., `
    +--                             `begin[n-1] + i[n-1] * stride[n-1])]`.
    +-- 
    +-- *Requirements*:
    +--   `0 != strides[i] for i in [0, n)`
    +stridedSlice :: forall v1 v2 v3 v4 index t . (TensorType index,
    +                                              OneOf '[Data.Int.Int32,
    +                                                      Data.Int.Int64] index,
    +                                              TensorType t) =>
    +                Tensor v1 t -- ^ __input__
    +                -> Tensor v2 index -- ^ __begin__: `begin[i]` specifies the offset into the `i`th dimension of
    +                                   -- `input` to slice from.
    +                -> Tensor v3 index -- ^ __end__: `end[i]` specifies the first offset into the `i`th dimension of
    +                                   -- `input` that will not be extracted. Out or range values are
    +                                   -- clamped to `[0,dim[i]) if slice[i] > 0` or `[-1,dim[i]-1]`
    +                                   -- `if slice[i] < 0`
    +                -> Tensor v4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th dimension
    +                                   -- after extracting a given element. Negative indices will reverse
    +                                   -- the original order. Out or range values are
    +                                   -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
    +                -> Tensor Value t -- ^ __output__
    +stridedSlice input begin end strides | eqLengthGuard [] =
    +    buildOp (opDef "StridedSlice"
    +             & opAttr "Index" .~ tensorType (undefined :: index)
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input begin end strides
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Index"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`"
    +  name: "begin_mask"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "analogous to `begin_mask`"
    +  name: "end_mask"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1."
    +  name: "ellipsis_mask"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "a bitmask where bit `i` being 1 means the `i`th\nposition creates a dimension in the tensor of length 1. Thus\nthe total number of elements remain unchanged but the shape\ngets a 1 in the appropriate position."
    +  name: "new_axis_mask"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "a bitmask where bit `i` implies that the `i`th\nposition should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:,3,:]` which would result in\n`shrink_axis_mask` being 2."
    +  name: "shrink_axis_mask"
    +  type: "int"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg {
    +  description: "`begin[i]` specifies the offset into the `i`th dimension of\n`input` to slice from."
    +  name: "begin"
    +  type_attr: "Index"
    +}
    +input_arg {
    +  description: "`end[i]` specifies the first offset into the `i`th dimension of\n`input` that will not be extracted. Out or range values are\nclamped to `[0,dim[i]) if slice[i] > 0` or `[-1,dim[i]-1]`\n`if slice[i] < 0`"
    +  name: "end"
    +  type_attr: "Index"
    +}
    +input_arg {
    +  description: "`strides[i]` specifies the increment in the `i`th dimension\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`"
    +  name: "strides"
    +  type_attr: "Index"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | A queue that randomizes the order of elements.
    +
    +randomShuffleQueue :: Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle to the queue.
    +randomShuffleQueue  | eqLengthGuard [] =
    +    buildOp (opDef "RandomShuffleQueue")
    +        
    +{-
    +attr {
    +  description: "The type of each component in a value."
    +  has_minimum: true
    +  minimum: 1
    +  name: "component_types"
    +  type: "list(type)"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
    +  has_minimum: true
    +  name: "shapes"
    +  type: "list(shape)"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
    +  name: "capacity"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements."
    +  name: "min_after_dequeue"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to the queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Returns the gradient of `Tile`.
    +--
    +-- Since `Tile` takes an input and repeats the input `multiples` times
    +-- along each dimension, `TileGrad` takes in `multiples` and aggregates
    +-- each repeated tile of `input` into `output`.
    +tileGrad :: forall v1 v2 t . (TensorType t) => Tensor v1 t -- ^ __input__
    +            -> Tensor v2 Data.Int.Int32 -- ^ __multiples__
    +            -> Tensor Value t -- ^ __output__
    +tileGrad input multiples | eqLengthGuard [] =
    +    buildOp (opDef "TileGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input multiples
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "multiples" type: DT_INT32 }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Assign `value` to the sliced l-value reference of `ref`.
    +--
    +-- The values of `value` are assigned to the positions in the variable
    +-- `ref` that are selected by the slice parameters. The slice parameters
    +-- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
    +-- 
    +-- NOTE this op currently does not support broadcasting and so `value`'s
    +-- shape must be exactly the shape produced by the slice of `ref`.
    +stridedSliceAssign :: forall v1 v2 v3 v4 v5 index t . (TensorType index,
    +                                                       OneOf '[Data.Int.Int32,
    +                                                               Data.Int.Int64] index,
    +                                                       TensorType t) =>
    +                      Tensor v1 t -- ^ __ref__
    +                      -> Tensor v2 index -- ^ __begin__
    +                      -> Tensor v3 index -- ^ __end__
    +                      -> Tensor v4 index -- ^ __strides__
    +                      -> Tensor v5 t -- ^ __value__
    +                      -> Tensor Value t -- ^ __output_ref__
    +stridedSliceAssign ref begin end strides value | eqLengthGuard [] =
    +    buildOp (opDef "StridedSliceAssign"
    +             & opAttr "Index" .~ tensorType (undefined :: index)
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        ref begin end strides value
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Index"
    +  type: "type"
    +}
    +attr { default_value { i: 0 } name: "begin_mask" type: "int" }
    +attr { default_value { i: 0 } name: "end_mask" type: "int" }
    +attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
    +attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
    +attr {
    +  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
    +}
    +input_arg { is_ref: true name: "ref" type_attr: "T" }
    +input_arg { name: "begin" type_attr: "Index" }
    +input_arg { name: "end" type_attr: "Index" }
    +input_arg { name: "strides" type_attr: "Index" }
    +input_arg { name: "value" type_attr: "T" }
    +output_arg { is_ref: true name: "output_ref" type_attr: "T" }
    +-}
    +
    +-- | Reshapes a tensor.
    +--
    +-- Given `tensor`, this operation returns a tensor that has the same values
    +-- as `tensor` with shape `shape`.
    +-- 
    +-- If one component of `shape` is the special value -1, the size of that dimension
    +-- is computed so that the total size remains constant.  In particular, a `shape`
    +-- of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
    +-- 
    +-- If `shape` is 1-D or higher, then the operation returns a tensor with shape
    +-- `shape` filled with the values of `tensor`. In this case, the number of elements
    +-- implied by `shape` must be the same as the number of elements in `tensor`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
    +-- # tensor 't' has shape [9]
    +-- reshape(t, [3, 3]) ==> [[1, 2, 3],
    +--                         [4, 5, 6],
    +--                         [7, 8, 9]]
    +-- 
    +-- # tensor 't' is [[[1, 1], [2, 2]],
    +-- #                [[3, 3], [4, 4]]]
    +-- # tensor 't' has shape [2, 2, 2]
    +-- reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
    +--                         [3, 3, 4, 4]]
    +-- 
    +-- # tensor 't' is [[[1, 1, 1],
    +-- #                 [2, 2, 2]],
    +-- #                [[3, 3, 3],
    +-- #                 [4, 4, 4]],
    +-- #                [[5, 5, 5],
    +-- #                 [6, 6, 6]]]
    +-- # tensor 't' has shape [3, 2, 3]
    +-- # pass '[-1]' to flatten 't'
    +-- reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
    +-- 
    +-- # -1 can also be used to infer the shape
    +-- 
    +-- # -1 is inferred to be 9:
    +-- reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
    +--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
    +-- # -1 is inferred to be 2:
    +-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
    +--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
    +-- # -1 is inferred to be 3:
    +-- reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
    +--                               [2, 2, 2],
    +--                               [3, 3, 3]],
    +--                              [[4, 4, 4],
    +--                               [5, 5, 5],
    +--                               [6, 6, 6]]]
    +-- 
    +-- # tensor 't' is [7]
    +-- # shape `[]` reshapes to a scalar
    +-- reshape(t, []) ==> 7
    +-- ```
    +reshape :: forall v1 v2 t tshape . (TensorType t, TensorType tshape,
    +                                    OneOf '[Data.Int.Int32,
    +                                            Data.Int.Int64] tshape) =>
    +           Tensor v1 t -- ^ __tensor__
    +           -> Tensor v2 tshape -- ^ __shape__: Defines the shape of the output tensor.
    +           -> Tensor Value t -- ^ __output__
    +reshape tensor shape | eqLengthGuard [] =
    +    buildOp (opDef "Reshape"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tshape" .~ tensorType (undefined :: tshape))
    +        tensor shape
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tshape"
    +  type: "type"
    +}
    +input_arg { name: "tensor" type_attr: "T" }
    +input_arg {
    +  description: "Defines the shape of the output tensor."
    +  name: "shape"
    +  type_attr: "Tshape"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | A queue that produces elements in first-in first-out order.
    +
    +fIFOQueue :: Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle to the queue.
    +fIFOQueue  | eqLengthGuard [] =
    +    buildOp (opDef "FIFOQueue")
    +        
    +{-
    +attr {
    +  description: "The type of each component in a value."
    +  has_minimum: true
    +  minimum: 1
    +  name: "component_types"
    +  type: "list(type)"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
    +  has_minimum: true
    +  name: "shapes"
    +  type: "list(shape)"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
    +  name: "capacity"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to the queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Generates labels for candidate sampling with a learned unigram distribution.
    +--
    +-- See explanations of candidate sampling and the data formats at
    +-- go/candidate-sampling.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +learnedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
    +                                  -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                                  -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
    +                                  -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                                          -- candidates in a batch are unique. This requires some approximation to
    +                                          -- estimate the post-rejection sampling probabilities.
    +                                  -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                              -- IDs of the num_true target_classes in the corresponding original label.
    +                                  -> (Tensor Value Data.Int.Int64,
    +                                      Tensor Value Float, Tensor Value Float)
    +                                  -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                                  --
    +                                  -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                                  -- the ID of a sampled candidate.
    +                                  --
    +                                  -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                                  -- the number of times each candidate is expected to occur in a batch
    +                                  -- of sampled candidates. If unique=true, then this is a probability.
    +                                  --
    +                                  -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                                  -- candidate representing the number of times the candidate is expected
    +                                  -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                                  -- probability.
    +learnedUnigramCandidateSampler num_sampled num_true range_max unique
    +                               true_classes | eqLengthGuard [] =
    +    buildOp (opDef "LearnedUnigramCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "range_max" .~ range_max
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to randomly sample per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  description: "The sampler will sample integers from the interval [0, range_max)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "range_max"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Performs fractional average pooling on the input.
    +--
    +-- Fractional average pooling is similar to Fractional max pooling in the pooling
    +-- region generation step. The only difference is that after pooling regions are
    +-- generated, a mean operation is performed instead of a max operation in each
    +-- pooling region.
    +fractionalAvgPool :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Double, Float] t) =>
    +                     Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
    +                     -> (Tensor Value t, Tensor Value Data.Int.Int64,
    +                         Tensor Value Data.Int.Int64)
    +                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
    +                     --
    +                     -- * __output__: output tensor after fractional avg pooling.
    +                     --
    +                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
    +                     --
    +                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
    +fractionalAvgPool value | eqLengthGuard [] =
    +    buildOp (opDef "FractionalAvgPool"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value
    +{-
    +attr {
    +  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
    +  has_minimum: true
    +  minimum: 4
    +  name: "pooling_ratio"
    +  type: "list(float)"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling] (http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
    +  name: "pseudo_random"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
    +  name: "overlapping"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic."
    +  name: "deterministic"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "output tensor after fractional avg pooling."
    +  name: "output"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "row pooling sequence, needed to calculate gradient."
    +  name: "row_pooling_sequence"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "column pooling sequence, needed to calculate gradient."
    +  name: "col_pooling_sequence"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Randomly crop `image`.
    +--
    +-- `size` is a 1-D int64 tensor with 2 elements representing the crop height and
    +-- width.  The values must be non negative.
    +-- 
    +-- This Op picks a random location in `image` and crops a `height` by `width`
    +-- rectangle from that location.  The random location is picked so the cropped
    +-- area will fit inside the original image.
    +randomCrop :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                      Data.Int.Int32,
    +                                                      Data.Int.Int64,
    +                                                      Data.Int.Int8,
    +                                                      Data.Word.Word8, Double,
    +                                                      Float] t) =>
    +              Tensor v1 t -- ^ __image__: 3-D of shape `[height, width, channels]`.
    +              -> Tensor v2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`..
    +              -> Tensor Value t -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].`
    +randomCrop image size | eqLengthGuard [] =
    +    buildOp (opDef "RandomCrop"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        image size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "3-D of shape `[height, width, channels]`."
    +  name: "image"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D of length 2 containing: `crop_height`, `crop_width`.."
    +  name: "size"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "3-D of shape `[crop_height, crop_width, channels].`"
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Cast x of type SrcT to y of DstT.
    +--
    +-- _HostCast requires its input and produces its output in host memory.
    +_HostCast :: forall v1 dstT srcT . (TensorType dstT, TensorType srcT) =>
    +             Tensor v1 srcT -- ^ __x__
    +             -> Tensor Value dstT -- ^ __y__
    +_HostCast x | eqLengthGuard [] =
    +    buildOp (opDef "_HostCast"
    +             & opAttr "DstT" .~ tensorType (undefined :: dstT)
    +             & opAttr "SrcT" .~ tensorType (undefined :: srcT))
    +        x
    +{-
    +attr { name: "SrcT" type: "type" }
    +attr { name: "DstT" type: "type" }
    +input_arg { name: "x" type_attr: "SrcT" }
    +output_arg { name: "y" type_attr: "DstT" }
    +-}
    +
    +-- | Closes the given queue.
    +--
    +-- This operation signals that no more elements will be enqueued in the
    +-- given queue. Subsequent Enqueue(Many) operations will fail.
    +-- Subsequent Dequeue(Many) operations will continue to succeed if
    +-- sufficient elements remain in the queue. Subsequent Dequeue(Many)
    +-- operations that would block will fail immediately.
    +queueClose :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
    +              -> ControlNode
    +queueClose handle | eqLengthGuard [] =
    +    buildOp (opDef "QueueClose")
    +        handle
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, all pending enqueue requests that are\nblocked on the given queue will be cancelled."
    +  name: "cancel_pending_enqueues"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "The handle to a queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Return a slice from 'input'.
    +--
    +-- The output tensor is a tensor with dimensions described by 'size'
    +-- whose values are extracted from 'input' starting at the offsets in
    +-- 'begin'.
    +-- 
    +-- *Requirements*:
    +--   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
    +slice :: forall v1 v2 v3 index t . (TensorType index, OneOf '[Data.Int.Int32,
    +                                                              Data.Int.Int64] index,
    +                                    TensorType t) => Tensor v1 t -- ^ __input__
    +         -> Tensor v2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of
    +                            -- 'input' to slice from.
    +         -> Tensor v3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension
    +                            -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension
    +                            -- i are included in the slice (i.e. this is equivalent to setting
    +                            -- size[i] = input.dim_size(i) - begin[i]).
    +         -> Tensor Value t -- ^ __output__
    +slice input begin size | eqLengthGuard [] =
    +    buildOp (opDef "Slice"
    +             & opAttr "Index" .~ tensorType (undefined :: index)
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input begin size
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Index"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg {
    +  description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from."
    +  name: "begin"
    +  type_attr: "Index"
    +}
    +input_arg {
    +  description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])."
    +  name: "size"
    +  type_attr: "Index"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns the gradient of `StridedSlice`.
    +--
    +-- Since `StridedSlice` cuts out pieces of its `input` which is size
    +-- `shape`, its gradient will have the same shape (which is passed here
    +-- as `shape`). The gradient will be zero in any element that the slice
    +-- does not select.
    +-- 
    +-- Arguments are the same as StridedSliceGrad with the exception that
    +-- `dy` is the input gradient to be propagated and `shape` is the
    +-- shape of `StridedSlice`'s `input`.
    +stridedSliceGrad :: forall v1 v2 v3 v4 v5 index t . (TensorType index,
    +                                                     OneOf '[Data.Int.Int32,
    +                                                             Data.Int.Int64] index,
    +                                                     TensorType t) =>
    +                    Tensor v1 index -- ^ __shape__
    +                    -> Tensor v2 index -- ^ __begin__
    +                    -> Tensor v3 index -- ^ __end__
    +                    -> Tensor v4 index -- ^ __strides__
    +                    -> Tensor v5 t -- ^ __dy__
    +                    -> Tensor Value t -- ^ __output__
    +stridedSliceGrad shape begin end strides dy | eqLengthGuard [] =
    +    buildOp (opDef "StridedSliceGrad"
    +             & opAttr "Index" .~ tensorType (undefined :: index)
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        shape begin end strides dy
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Index"
    +  type: "type"
    +}
    +attr { default_value { i: 0 } name: "begin_mask" type: "int" }
    +attr { default_value { i: 0 } name: "end_mask" type: "int" }
    +attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
    +attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
    +attr {
    +  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
    +}
    +input_arg { name: "shape" type_attr: "Index" }
    +input_arg { name: "begin" type_attr: "Index" }
    +input_arg { name: "end" type_attr: "Index" }
    +input_arg { name: "strides" type_attr: "Index" }
    +input_arg { name: "dy" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
    +--
    +-- This Op does not require `a_indices` be sorted in standard lexicographic order.
    +sparseTensorDenseAdd :: forall v1 v2 v3 v4 t tindices . (TensorType t,
    +                                                         OneOf '[(Data.Complex.Complex Double),
    +                                                                 (Data.Complex.Complex Float),
    +                                                                 Data.Int.Int16,
    +                                                                 Data.Int.Int32,
    +                                                                 Data.Int.Int64,
    +                                                                 Data.Int.Int8,
    +                                                                 Data.Word.Word16,
    +                                                                 Data.Word.Word8,
    +                                                                 Double,
    +                                                                 Float] t,
    +                                                         TensorType tindices,
    +                                                         OneOf '[Data.Int.Int32,
    +                                                                 Data.Int.Int64] tindices) =>
    +                        Tensor v1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
    +                        -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
    +                        -> Tensor v3 tindices -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
    +                        -> Tensor v4 t -- ^ __b__: `ndims`-D Tensor.  With shape `a_shape`.
    +                        -> Tensor Value t -- ^ __output__
    +sparseTensorDenseAdd a_indices a_values a_shape b | eqLengthGuard [] =
    +    buildOp (opDef "SparseTensorDenseAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        a_indices a_values a_shape b
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`."
    +  name: "a_indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`."
    +  name: "a_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`."
    +  name: "a_shape"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "`ndims`-D Tensor.  With shape `a_shape`."
    +  name: "b"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns the size of a tensor.
    +--
    +-- This operation returns an integer representing the number of elements in
    +-- `input`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
    +-- size(t) ==> 12
    +-- ```
    +size :: forall v1 t out_type . (TensorType t, TensorType out_type,
    +                                OneOf '[Data.Int.Int32,
    +                                        Data.Int.Int64] out_type) =>
    +        Tensor v1 t -- ^ __input__
    +        -> Tensor Value out_type -- ^ __output__
    +size input | eqLengthGuard [] =
    +    buildOp (opDef "Size"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_type"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "out_type" }
    +-}
    +
    +-- | Defines a barrier that persists across different graph executions.
    +--
    +-- A barrier represents a key-value map, where each key is a string, and
    +-- each value is a tuple of tensors.
    +-- 
    +-- At runtime, the barrier contains 'complete' and 'incomplete'
    +-- elements. A complete element has defined tensors for all components of
    +-- its value tuple, and may be accessed using BarrierTakeMany. An
    +-- incomplete element has some undefined components in its value tuple,
    +-- and may be updated using BarrierInsertMany.
    +barrier :: Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle to the barrier.
    +barrier  | eqLengthGuard [] =
    +    buildOp (opDef "Barrier")
    +        
    +{-
    +attr {
    +  description: "The type of each component in a value."
    +  has_minimum: true
    +  minimum: 1
    +  name: "component_types"
    +  type: "list(type)"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types."
    +  has_minimum: true
    +  name: "shapes"
    +  type: "list(shape)"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The capacity of the barrier.  The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue."
    +  name: "capacity"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to the barrier."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Computes the log of the absolute value of `Gamma(x)` element-wise.
    +
    +lgamma :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                               Float] t) =>
    +          Tensor v1 t -- ^ __x__
    +          -> Tensor Value t -- ^ __y__
    +lgamma x | eqLengthGuard [] =
    +    buildOp (opDef "Lgamma"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Decode a JPEG-encoded image to a uint8 tensor.
    +--
    +-- The attr `channels` indicates the desired number of color channels for the
    +-- decoded image.
    +-- 
    +-- Accepted values are:
    +-- 
    +-- *   0: Use the number of channels in the JPEG-encoded image.
    +-- *   1: output a grayscale image.
    +-- *   3: output an RGB image.
    +-- 
    +-- If needed, the JPEG-encoded image is transformed to match the requested number
    +-- of color channels.
    +-- 
    +-- The attr `ratio` allows downscaling the image by an integer factor during
    +-- decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
    +-- downscaling the image later.
    +decodeJpeg :: Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The JPEG-encoded image.
    +              -> Tensor Value Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`..
    +decodeJpeg contents | eqLengthGuard [] =
    +    buildOp (opDef "DecodeJpeg")
    +        contents
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "Number of color channels for the decoded image."
    +  name: "channels"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 1 }
    +  description: "Downscaling ratio."
    +  name: "ratio"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
    +  name: "fancy_upscaling"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true try to recover an image from truncated input."
    +  name: "try_recover_truncated"
    +  type: "bool"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "The minimum required fraction of lines before a truncated\ninput is accepted."
    +  name: "acceptable_fraction"
    +  type: "float"
    +}
    +input_arg {
    +  description: "0-D.  The JPEG-encoded image."
    +  name: "contents"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "3-D with shape `[height, width, channels]`.."
    +  name: "image"
    +  type: DT_UINT8
    +}
    +-}
    +
    +-- | Returns shape of tensors.
    +--
    +-- This operation returns N 1-D integer tensors representing shape of `input[i]s`.
    +shapeN :: forall v1 t out_type . (TensorType t, TensorType out_type,
    +                                  OneOf '[Data.Int.Int32,
    +                                          Data.Int.Int64] out_type) =>
    +          [Tensor v1 t] -- ^ __input__
    +          -> [Tensor Value out_type] -- ^ __output__
    +shapeN input | eqLengthGuard [("N", [("input", length input)])] =
    +    buildOp (opDef "ShapeN"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type)
    +             & opAttr "N" .~ (fromIntegral (length input) :: Int64))
    +        input
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_type"
    +  type: "type"
    +}
    +input_arg { name: "input" number_attr: "N" type_attr: "T" }
    +output_arg {
    +  name: "output" number_attr: "N" type_attr: "out_type"
    +}
    +-}
    +
    +-- | Generates labels for candidate sampling with a uniform distribution.
    +--
    +-- See explanations of candidate sampling and the data formats at
    +-- go/candidate-sampling.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +uniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
    +                           -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                           -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
    +                           -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                                   -- candidates in a batch are unique. This requires some approximation to
    +                                   -- estimate the post-rejection sampling probabilities.
    +                           -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                       -- IDs of the num_true target_classes in the corresponding original label.
    +                           -> (Tensor Value Data.Int.Int64, Tensor Value Float,
    +                               Tensor Value Float)
    +                           -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                           --
    +                           -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                           -- the ID of a sampled candidate.
    +                           --
    +                           -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                           -- the number of times each candidate is expected to occur in a batch
    +                           -- of sampled candidates. If unique=true, then this is a probability.
    +                           --
    +                           -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                           -- candidate representing the number of times the candidate is expected
    +                           -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                           -- probability.
    +uniformCandidateSampler num_sampled num_true range_max unique
    +                        true_classes | eqLengthGuard [] =
    +    buildOp (opDef "UniformCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "range_max" .~ range_max
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to randomly sample per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  description: "The sampler will sample integers from the interval [0, range_max)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "range_max"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Finds unique elements in a 1-D tensor.
    +--
    +-- This operation returns a tensor `y` containing all of the unique elements of `x`
    +-- sorted in the same order that they occur in `x`. This operation also returns a
    +-- tensor `idx` the same size as `x` that contains the index of each value of `x`
    +-- in the unique output `y`. In other words:
    +-- 
    +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    +-- y, idx = unique(x)
    +-- y ==> [1, 2, 4, 7, 8]
    +-- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    +-- ```
    +unique :: forall v1 t out_idx . (TensorType t, TensorType out_idx,
    +                                 OneOf '[Data.Int.Int32,
    +                                         Data.Int.Int64] out_idx) =>
    +          Tensor v1 t -- ^ __x__: 1-D.
    +          -> (Tensor Value t, Tensor Value out_idx)
    +          -- ^ (__y__, __idx__)
    +          --
    +          -- * __y__: 1-D.
    +          --
    +          -- * __idx__: 1-D.
    +unique x | eqLengthGuard [] =
    +    buildOp (opDef "Unique"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
    +        x
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_idx"
    +  type: "type"
    +}
    +input_arg { description: "1-D." name: "x" type_attr: "T" }
    +output_arg { description: "1-D." name: "y" type_attr: "T" }
    +output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
    +-}
    +
    +-- | Draw bounding boxes on a batch of images.
    +--
    +-- Outputs a copy of `images` but draws on top of the pixels zero or more bounding
    +-- boxes specified by the locations in `boxes`. The coordinates of the each
    +-- bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
    +-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
    +-- height of the underlying image.
    +-- 
    +-- For example, if an image is 100 x 200 pixels and the bounding box is
    +-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the
    +-- bounding box will be `(10, 40)` to `(50, 180)`.
    +-- 
    +-- Parts of the bounding box may fall outside the image.
    +drawBoundingBoxes :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                             Float] t) =>
    +                     Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images.
    +                     -> Tensor v2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
    +                                        -- boxes.
    +                     -> Tensor Value t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with
    +                     -- bounding boxes drawn on the images.
    +drawBoundingBoxes images boxes | eqLengthGuard [] =
    +    buildOp (opDef "DrawBoundingBoxes"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images boxes
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, depth]`. A batch of images."
    +  name: "images"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes."
    +  name: "boxes"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Split the data from the input value into TensorArray elements.
    +--
    +-- Assuming that `lengths` takes on values
    +-- 
    +--   ```(n0, n1, ..., n(T-1))```
    +-- 
    +-- and that `value` has shape
    +-- 
    +--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
    +-- 
    +-- this splits values into a TensorArray with T tensors.
    +-- 
    +-- TensorArray index t will be the subtensor of values with starting position
    +-- 
    +--   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
    +-- 
    +-- and having size
    +-- 
    +--   ```nt x d0 x d1 x ...```
    +tensorArraySplit :: forall v1 v2 v3 v4 t . (TensorType t) =>
    +                    Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                    -> Tensor v2 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
    +                    -> Tensor v3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the
    +                                                -- TensorArray.
    +                    -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                    -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
    +tensorArraySplit handle value lengths flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArraySplit"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        handle value lengths flow_in
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The concatenated tensor to write to the TensorArray."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The vector of lengths, how to split the rows of value into the\nTensorArray."
    +  name: "lengths"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_out"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Splits a tensor into `num_split` tensors along one dimension.
    +
    +split :: forall v1 v2 t . (TensorType t) =>
    +         Data.Int.Int64 -- ^ __num_split__: The number of ways to split.  Must evenly divide
    +                        -- `value.shape[split_dim]`.
    +         -> Tensor v1 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
    +                                     -- `[0, rank(value))`.
    +         -> Tensor v2 t -- ^ __value__: The tensor to split.
    +         -> [Tensor Value t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value`
    +         -- except along `split_dim`, where their sizes are
    +         -- `values.shape[split_dim] / num_split`.
    +split num_split split_dim value | eqLengthGuard [] =
    +    buildListOp [num_split] (opDef "Split"
    +                             & opAttr "T" .~ tensorType (undefined :: t)
    +                             & opAttr "num_split" .~ num_split)
    +        split_dim value
    +{-
    +attr {
    +  description: "The number of ways to split.  Must evenly divide\n`value.shape[split_dim]`."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_split"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(value))`."
    +  name: "split_dim"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "The tensor to split." name: "value" type_attr: "T"
    +}
    +output_arg {
    +  description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`."
    +  name: "output"
    +  number_attr: "num_split"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the maximum along segments of a tensor.
    +--
    +-- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)
    +-- for an explanation of segments.
    +-- 
    +-- Computes a tensor such that
    +-- \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
    +-- that `segment_ids[j] == i`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/SegmentMax.png" alt>
    +-- </div>
    +segmentMax :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                               Data.Int.Int32,
    +                                                               Data.Int.Int64,
    +                                                               Data.Int.Int8,
    +                                                               Data.Word.Word16,
    +                                                               Data.Word.Word8,
    +                                                               Double, Float] t,
    +                                         TensorType tindices,
    +                                         OneOf '[Data.Int.Int32,
    +                                                 Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __data__
    +              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
    +                                    -- first dimension.  Values should be sorted and can be repeated.
    +              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +              -- has size `k`, the number of segments.
    +segmentMax data' segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SegmentMax"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Raise a exception to abort the process when called.
    +--
    +-- Returns nothing but an exception.
    +abort :: ControlNode
    +abort  | eqLengthGuard [] =
    +    buildOp (opDef "Abort")
    +        
    +{-
    +attr {
    +  default_value { s: "" }
    +  description: "A string which is the message associated with the exception."
    +  name: "error_msg"
    +  type: "string"
    +}
    +-}
    +
    +-- | Reorders a SparseTensor into the canonical, row-major ordering.
    +--
    +-- Note that by convention, all sparse ops preserve the canonical ordering along
    +-- increasing dimension number. The only time ordering can be violated is during
    +-- manual manipulation of the indices and values vectors to add entries.
    +-- 
    +-- Reordering does not affect the shape of the SparseTensor.
    +-- 
    +-- If the tensor has rank `R` and `N` non-empty values, `input_indices` has
    +-- shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
    +sparseReorder :: forall v1 v2 v3 t . (TensorType t) =>
    +                 Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                          -- SparseTensor, possibly not in canonical ordering.
    +                 -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
    +                 -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
    +                 -> (Tensor Value Data.Int.Int64, Tensor Value t)
    +                 -- ^ (__output_indices__, __output_values__)
    +                 --
    +                 -- * __output_indices__: 2-D.  `N x R` matrix with the same indices as input_indices, but
    +                 -- in canonical row-major ordering.
    +                 --
    +                 -- * __output_values__: 1-D.  `N` non-empty values corresponding to `output_indices`.
    +sparseReorder input_indices input_values input_shape | eqLengthGuard [] =
    +    buildOp (opDef "SparseReorder"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_indices input_values input_shape
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "input_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
    +  name: "input_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "input_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "2-D.  `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering."
    +  name: "output_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `output_indices`."
    +  name: "output_values"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient for the rsqrt of `x` wrt its input.
    +--
    +-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
    +-- is the corresponding input gradient.
    +rsqrtGrad :: forall v1 v2 t . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Word.Word16, Double, Float] t) =>
    +             Tensor v1 t -- ^ __x__
    +             -> Tensor v2 t -- ^ __y__
    +             -> Tensor Value t -- ^ __z__
    +rsqrtGrad x y | eqLengthGuard [] =
    +    buildOp (opDef "RsqrtGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Reverses variable length slices.
    +--
    +-- This op first slices `input` along the dimension `batch_dim`, and for each
    +-- slice `i`, reverses the first `seq_lengths[i]` elements along
    +-- the dimension `seq_dim`.
    +-- 
    +-- The elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`,
    +-- and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
    +-- 
    +-- The output slice `i` along dimension `batch_dim` is then given by input
    +-- slice `i`, with the first `seq_lengths[i]` slices along dimension
    +-- `seq_dim` reversed.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # Given this:
    +-- batch_dim = 0
    +-- seq_dim = 1
    +-- input.dims = (4, 8, ...)
    +-- seq_lengths = [7, 2, 3, 5]
    +-- 
    +-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
    +-- output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
    +-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
    +-- output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
    +-- output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
    +-- 
    +-- # while entries past seq_lens are copied through:
    +-- output[0, 7:, :, ...] = input[0, 7:, :, ...]
    +-- output[1, 2:, :, ...] = input[1, 2:, :, ...]
    +-- output[2, 3:, :, ...] = input[2, 3:, :, ...]
    +-- output[3, 2:, :, ...] = input[3, 2:, :, ...]
    +-- ```
    +-- 
    +-- In contrast, if:
    +-- 
    +-- ```prettyprint
    +-- # Given this:
    +-- batch_dim = 2
    +-- seq_dim = 0
    +-- input.dims = (8, ?, 4, ...)
    +-- seq_lengths = [7, 2, 3, 5]
    +-- 
    +-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
    +-- output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
    +-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
    +-- output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
    +-- output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
    +-- 
    +-- # while entries past seq_lens are copied through:
    +-- output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
    +-- output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
    +-- output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
    +-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
    +-- ```
    +reverseSequence :: forall v1 v2 t tlen . (TensorType t, TensorType tlen,
    +                                          OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] tlen) =>
    +                   Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed.
    +                   -> Tensor v1 t -- ^ __input__: The input to reverse.
    +                   -> Tensor v2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and
    +                                     -- `max(seq_lengths) < input.dims(seq_dim)`
    +                   -> Tensor Value t -- ^ __output__: The partially reversed input. It has the same shape as `input`.
    +reverseSequence seq_dim input seq_lengths | eqLengthGuard [] =
    +    buildOp (opDef "ReverseSequence"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tlen" .~ tensorType (undefined :: tlen)
    +             & opAttr "seq_dim" .~ seq_dim)
    +        input seq_lengths
    +{-
    +attr {
    +  description: "The dimension which is partially reversed."
    +  name: "seq_dim"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "The dimension along which reversal is performed."
    +  name: "batch_dim"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT64 }
    +  name: "Tlen"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The input to reverse." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) < input.dims(seq_dim)`"
    +  name: "seq_lengths"
    +  type_attr: "Tlen"
    +}
    +output_arg {
    +  description: "The partially reversed input. It has the same shape as `input`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns the number of records this Reader has produced.
    +--
    +-- This is the same as the number of ReaderRead executions that have
    +-- succeeded.
    +readerNumRecordsProduced :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +                            -> Tensor Value Data.Int.Int64 -- ^ __records_produced__
    +readerNumRecordsProduced reader_handle | eqLengthGuard [] =
    +    buildOp (opDef "ReaderNumRecordsProduced")
    +        reader_handle
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +output_arg { name: "records_produced" type: DT_INT64 }
    +-}
    +
    +-- | Deserialize and concatenate `SparseTensors` from a serialized minibatch.
    +--
    +-- The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
    +-- `N` is the minibatch size and the rows correspond to packed outputs of
    +-- `SerializeSparse`.  The ranks of the original `SparseTensor` objects
    +-- must all match.  When the final `SparseTensor` is created, it has rank one
    +-- higher than the ranks of the incoming `SparseTensor` objects
    +-- (they have been concatenated along a new row dimension).
    +-- 
    +-- The output `SparseTensor` object's shape values for all dimensions but the
    +-- first are the max across the input `SparseTensor` objects' shape values
    +-- for the corresponding dimensions.  Its first shape value is `N`, the minibatch
    +-- size.
    +-- 
    +-- The input `SparseTensor` objects' indices are assumed ordered in
    +-- standard lexicographic order.  If this is not the case, after this
    +-- step run `SparseReorder` to restore index ordering.
    +-- 
    +-- For example, if the serialized input is a `[2 x 3]` matrix representing two
    +-- original `SparseTensor` objects:
    +-- 
    +--     index = [ 0]
    +--             [10]
    +--             [20]
    +--     values = [1, 2, 3]
    +--     shape = [50]
    +-- 
    +-- and
    +-- 
    +--     index = [ 2]
    +--             [10]
    +--     values = [4, 5]
    +--     shape = [30]
    +-- 
    +-- then the final deserialized `SparseTensor` will be:
    +-- 
    +--     index = [0  0]
    +--             [0 10]
    +--             [0 20]
    +--             [1  2]
    +--             [1 10]
    +--     values = [1, 2, 3, 4, 5]
    +--     shape = [2 50]
    +deserializeManySparse :: forall v1 dtype . (TensorType dtype) =>
    +                         Tensor v1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects.
    +                                                              -- Must have 3 columns.
    +                         -> (Tensor Value Data.Int.Int64, Tensor Value dtype,
    +                             Tensor Value Data.Int.Int64)
    +                         -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
    +                         --
    +                         -- * __sparse_indices__
    +                         --
    +                         -- * __sparse_values__
    +                         --
    +                         -- * __sparse_shape__
    +deserializeManySparse serialized_sparse | eqLengthGuard [] =
    +    buildOp (opDef "DeserializeManySparse"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        serialized_sparse
    +{-
    +attr {
    +  description: "The `dtype` of the serialized `SparseTensor` objects."
    +  name: "dtype"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns."
    +  name: "serialized_sparse"
    +  type: DT_STRING
    +}
    +output_arg { name: "sparse_indices" type: DT_INT64 }
    +output_arg { name: "sparse_values" type_attr: "dtype" }
    +output_arg { name: "sparse_shape" type: DT_INT64 }
    +-}
    +
    +-- | Returns immutable tensor from memory region.
    +--
    +-- The current implementation memmaps the tensor from a file.
    +immutableConst :: forall dtype . (TensorType dtype) =>
    +                  Tensor Value dtype -- ^ __tensor__
    +immutableConst  | eqLengthGuard [] =
    +    buildOp (opDef "ImmutableConst"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        
    +{-
    +attr {
    +  description: "Type of the returned tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  description: "Shape of the returned tensor."
    +  name: "shape"
    +  type: "shape"
    +}
    +attr {
    +  description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env."
    +  name: "memory_region_name"
    +  type: "string"
    +}
    +output_arg { name: "tensor" type_attr: "dtype" }
    +-}
    +
    +-- | Returns the min of x and y (i.e. x < y ? x : y) element-wise.
    +--
    +-- *NOTE*: `Minimum` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +minimum :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                   Data.Int.Int64,
    +                                                   Data.Word.Word16, Double,
    +                                                   Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor v2 t -- ^ __y__
    +           -> Tensor Value t -- ^ __z__
    +minimum x y | eqLengthGuard [] =
    +    buildOp (opDef "Minimum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Initializes a table from a text file.
    +--
    +-- It inserts one key-value pair into the table for each line of the file.
    +-- The key and value is extracted from the whole line content, elements from the
    +-- split line based on `delimiter` or the line number (starting from zero).
    +-- Where to extract the key and value from a line is specified by `key_index` and
    +-- `value_index`.
    +-- 
    +-- - A value of -1 means use the line number(starting from zero), expects `int64`.
    +-- - A value of -2 means use the whole line content, expects `string`.
    +-- - A value >= 0 means use the index (starting at zero) of the split line based
    +--   on `delimiter`.
    +initializeTableFromTextFile :: Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
    +                               -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
    +                                                 -- `value` values from.
    +                               -> Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
    +                               -> Tensor v2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
    +                               -> ControlNode
    +initializeTableFromTextFile key_index value_index table_handle
    +                            filename | eqLengthGuard [] =
    +    buildOp (opDef "InitializeTableFromTextFile"
    +             & opAttr "key_index" .~ key_index
    +             & opAttr "value_index" .~ value_index)
    +        table_handle filename
    +{-
    +attr {
    +  description: "Column index in a line to get the table `key` values from."
    +  has_minimum: true
    +  minimum: -2
    +  name: "key_index"
    +  type: "int"
    +}
    +attr {
    +  description: "Column index that represents information of a line to get the table\n`value` values from."
    +  has_minimum: true
    +  minimum: -2
    +  name: "value_index"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "Number of elements of the file, use -1 if unknown."
    +  has_minimum: true
    +  minimum: -1
    +  name: "vocab_size"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "\t" }
    +  description: "Delimiter to separate fields in a line."
    +  name: "delimiter"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Handle to a table which will be initialized."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Filename of a vocabulary text file."
    +  name: "filename"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Returns the diagonal part of the tensor.
    +--
    +-- This operation returns a tensor with the `diagonal` part
    +-- of the `input`. The `diagonal` part is computed as follows:
    +-- 
    +-- Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
    +-- tensor of rank `k` with dimensions `[D1,..., Dk]` where:
    +-- 
    +-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'input' is [[1, 0, 0, 0]
    +--               [0, 2, 0, 0]
    +--               [0, 0, 3, 0]
    +--               [0, 0, 0, 4]]
    +-- 
    +-- tf.diag_part(input) ==> [1, 2, 3, 4]
    +-- ```
    +diagPart :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                 (Data.Complex.Complex Float),
    +                                                 Data.Int.Int32, Data.Int.Int64,
    +                                                 Double, Float] t) =>
    +            Tensor v1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6.
    +            -> Tensor Value t -- ^ __diagonal__: The extracted diagonal.
    +diagPart input | eqLengthGuard [] =
    +    buildOp (opDef "DiagPart"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Rank k tensor where k is 2, 4, or 6."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The extracted diagonal."
    +  name: "diagonal"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes natural logarithm of x element-wise.
    +--
    +-- I.e., \\(y = \log_e x\\).
    +log :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +log x | eqLengthGuard [] =
    +    buildOp (opDef "Log"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Scatter the data from the input value into specific TensorArray elements.
    +--
    +-- `indices` must be a vector, its length must match the first dim of `value`.
    +tensorArrayScatter :: forall v1 v2 v3 v4 t . (TensorType t) =>
    +                      Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                      -> Tensor v2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements.
    +                      -> Tensor v3 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
    +                      -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                      -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
    +tensorArrayScatter handle indices value flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayScatter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        handle indices value flow_in
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The locations at which to write the tensor elements."
    +  name: "indices"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "The concatenated tensor to write to the TensorArray."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_out"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Returns the rank of a tensor.
    +--
    +-- This operation returns an integer representing the rank of `input`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    +-- # shape of tensor 't' is [2, 2, 3]
    +-- rank(t) ==> 3
    +-- ```
    +-- 
    +-- **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
    +-- of a tensor is the number of indices required to uniquely select each element
    +-- of the tensor. Rank is also known as "order", "degree", or "ndims."
    +rank :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
    +        -> Tensor Value Data.Int.Int32 -- ^ __output__
    +rank input | eqLengthGuard [] =
    +    buildOp (opDef "Rank"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type: DT_INT32 }
    +-}
    +
    +-- | Return a tensor with the same shape and contents as the input tensor or value.
    +
    +identity :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
    +            -> Tensor Value t -- ^ __output__
    +identity input | eqLengthGuard [] =
    +    buildOp (opDef "Identity"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Adjust the contrast of one or more images.
    +--
    +-- `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
    +-- interpreted as `[height, width, channels]`.  The other dimensions only
    +-- represent a collection of images, such as `[batch, height, width, channels].`
    +-- 
    +-- Contrast is adjusted independently for each channel of each image.
    +-- 
    +-- For each channel, the Op first computes the mean of the image pixels in the
    +-- channel and then adjusts each component of each pixel to
    +-- `(x - mean) * contrast_factor + mean`.
    +adjustContrastv2 :: Tensor v1 Float -- ^ __images__: Images to adjust.  At least 3-D.
    +                    -> Tensor v2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast.
    +                    -> Tensor Value Float -- ^ __output__: The contrast-adjusted image or images.
    +adjustContrastv2 images contrast_factor | eqLengthGuard [] =
    +    buildOp (opDef "AdjustContrastv2")
    +        images contrast_factor
    +{-
    +input_arg {
    +  description: "Images to adjust.  At least 3-D."
    +  name: "images"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A float multiplier for adjusting contrast."
    +  name: "contrast_factor"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "The contrast-adjusted image or images."
    +  name: "output"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
    +--
    +-- That is for rows we have grad for, we update var and accum as follows:
    +-- accum += grad * grad
    +-- prox_v = var
    +-- prox_v -= lr * grad * (1 / sqrt(accum))
    +-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
    +sparseApplyProximalAdagrad :: forall v1 v2 v3 v4 v5 v6 v7 t
    +                              tindices . (TensorType t,
    +                                          OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t, TensorType tindices,
    +                                          OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] tindices) =>
    +                              Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                              -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                              -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
    +                              -> Tensor v4 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                              -> Tensor v5 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                              -> Tensor v6 t -- ^ __grad__: The gradient.
    +                              -> Tensor v7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                              -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyProximalAdagrad var accum lr l1 l2 grad indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyProximalAdagrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var accum lr l1 l2 grad indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Learning rate. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Gather slices from `params` according to `indices`.
    +--
    +-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
    +-- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
    +-- 
    +--     # Scalar indices
    +--     output[:, ..., :] = params[indices, :, ... :]
    +-- 
    +--     # Vector indices
    +--     output[i, :, ..., :] = params[indices[i], :, ... :]
    +-- 
    +--     # Higher rank indices
    +--     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
    +-- 
    +-- If `indices` is a permutation and `len(indices) == params.shape[0]` then
    +-- this operation will permute `params` accordingly.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/Gather.png" alt>
    +-- </div>
    +gather :: forall v1 v2 tindices tparams . (TensorType tindices,
    +                                           OneOf '[Data.Int.Int32,
    +                                                   Data.Int.Int64] tindices,
    +                                           TensorType tparams) =>
    +          Tensor v1 tparams -- ^ __params__
    +          -> Tensor v2 tindices -- ^ __indices__
    +          -> Tensor Value tparams -- ^ __output__
    +gather params indices | eqLengthGuard [] =
    +    buildOp (opDef "Gather"
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices)
    +             & opAttr "Tparams" .~ tensorType (undefined :: tparams))
    +        params indices
    +{-
    +attr {
    +  default_value { b: true } name: "validate_indices" type: "bool"
    +}
    +attr { name: "Tparams" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "params" type_attr: "Tparams" }
    +input_arg { name: "indices" type_attr: "Tindices" }
    +output_arg { name: "output" type_attr: "Tparams" }
    +-}
    +
    +-- | Checks whether a tensor has been initialized.
    +--
    +-- Outputs boolean scalar indicating whether the tensor has been initialized.
    +isVariableInitialized :: forall v1 dtype . (TensorType dtype) =>
    +                         Tensor v1 dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
    +                         -> Tensor Value Bool -- ^ __is_initialized__
    +isVariableInitialized ref | eqLengthGuard [] =
    +    buildOp (opDef "IsVariableInitialized"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        ref
    +{-
    +attr {
    +  description: "The type of elements in the variable tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node. May be uninitialized."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "dtype"
    +}
    +output_arg { name: "is_initialized" type: DT_BOOL }
    +-}
    +
    +-- | Concatenates tensors along one dimension.
    +
    +concat :: forall v1 v2 t . (TensorType t) =>
    +          Tensor v1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
    +                                   -- range [0, rank(values)).
    +          -> [Tensor v2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
    +                           -- and their sizes must match in all dimensions except `concat_dim`.
    +          -> Tensor Value t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
    +          -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
    +          -- in `concat_dim` where it has the sum of the sizes.
    +concat concat_dim values | eqLengthGuard [("N", [("values", length values)])] =
    +    buildOp (opDef "Concat"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length values) :: Int64))
    +        concat_dim values
    +{-
    +attr { has_minimum: true minimum: 2 name: "N" type: "int" }
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
    +  name: "concat_dim"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
    +  name: "values"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Outputs random integers from a uniform distribution.
    +--
    +-- The generated values are uniform integers in the range `[minval, maxval)`.
    +-- The lower bound `minval` is included in the range, while the upper bound
    +-- `maxval` is excluded.
    +-- 
    +-- The random integers are slightly biased unless `maxval - minval` is an exact
    +-- power of two.  The bias is small for values of `maxval - minval` significantly
    +-- smaller than the range of the output (either `2^32` or `2^64`).
    +randomUniformInt :: forall v1 v2 v3 t tout . (TensorType t,
    +                                              OneOf '[Data.Int.Int32,
    +                                                      Data.Int.Int64] t,
    +                                              TensorType tout,
    +                                              OneOf '[Data.Int.Int32,
    +                                                      Data.Int.Int64] tout) =>
    +                    Tensor v1 t -- ^ __shape__: The shape of the output tensor.
    +                    -> Tensor v2 tout -- ^ __minval__: 0-D.  Inclusive lower bound on the generated integers.
    +                    -> Tensor v3 tout -- ^ __maxval__: 0-D.  Exclusive upper bound on the generated integers.
    +                    -> Tensor Value tout -- ^ __output__: A tensor of the specified shape filled with uniform random integers.
    +randomUniformInt shape minval maxval | eqLengthGuard [] =
    +    buildOp (opDef "RandomUniformInt"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        shape minval maxval
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tout"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The shape of the output tensor."
    +  name: "shape"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "0-D.  Inclusive lower bound on the generated integers."
    +  name: "minval"
    +  type_attr: "Tout"
    +}
    +input_arg {
    +  description: "0-D.  Exclusive upper bound on the generated integers."
    +  name: "maxval"
    +  type_attr: "Tout"
    +}
    +output_arg {
    +  description: "A tensor of the specified shape filled with uniform random integers."
    +  name: "output"
    +  type_attr: "Tout"
    +}
    +-}
    +
    +-- | Stops gradient computation.
    +--
    +-- When executed in a graph, this op outputs its input tensor as-is.
    +-- 
    +-- When building ops to compute gradients, this op prevents the contribution of
    +-- its inputs to be taken into account.  Normally, the gradient generator adds ops
    +-- to a graph to compute the derivatives of a specified 'loss' by recursively
    +-- finding out inputs that contributed to its computation.  If you insert this op
    +-- in the graph it inputs are masked from the gradient generator.  They are not
    +-- taken into account for computing gradients.
    +-- 
    +-- This is useful any time you want to compute a value with TensorFlow but need
    +-- to pretend that the value was a constant. Some examples include:
    +-- 
    +-- *  The *EM* algorithm where the *M-step* should not involve backpropagation
    +--    through the output of the *E-step*.
    +-- *  Contrastive divergence training of Boltzmann machines where, when
    +--    differentiating the energy function, the training must not backpropagate
    +--    through the graph that generated the samples from the model.
    +-- *  Adversarial training, where no backprop should happen through the adversarial
    +--    example generation process.
    +stopGradient :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
    +                -> Tensor Value t -- ^ __output__
    +stopGradient input | eqLengthGuard [] =
    +    buildOp (opDef "StopGradient"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Performs average pooling on the input.
    +--
    +-- Each entry in `output` is the mean of the corresponding size `ksize`
    +-- window in `value`.
    +avgPool :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                Float] t) =>
    +           Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
    +           -> Tensor Value t -- ^ __output__: The average pooled output tensor.
    +avgPool value | eqLengthGuard [] =
    +    buildOp (opDef "AvgPool"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value
    +{-
    +attr {
    +  description: "The size of the sliding window for each dimension of `value`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of `value`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The average pooled output tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | A Reader that outputs the entire contents of a file as a value.
    +--
    +-- To use, enqueue filenames in a Queue.  The output of ReaderRead will
    +-- be a filename (key) and the contents of that file (value).
    +wholeFileReader :: Tensor Value Data.ByteString.ByteString -- ^ __reader_handle__: The handle to reference the Reader.
    +wholeFileReader  | eqLengthGuard [] =
    +    buildOp (opDef "WholeFileReader")
    +        
    +{-
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to reference the Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Forwards `data` to the output port determined by `pred`.
    +--
    +-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
    +-- the data goes to `output_false`.
    +-- 
    +-- See also `RefSwitch` and `Merge`.
    +switch :: forall v1 v2 t . (TensorType t) =>
    +          Tensor v1 t -- ^ __data__: The tensor to be forwarded to the appropriate output.
    +          -> Tensor v2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
    +          -> (Tensor Value t, Tensor Value t)
    +          -- ^ (__output_false__, __output_true__)
    +          --
    +          -- * __output_false__: If `pred` is false, data will be forwarded to this output.
    +          --
    +          -- * __output_true__: If `pred` is true, data will be forwarded to this output.
    +switch data' pred | eqLengthGuard [] =
    +    buildOp (opDef "Switch"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data' pred
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be forwarded to the appropriate output."
    +  name: "data"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A scalar that specifies which output port will receive data."
    +  name: "pred"
    +  type: DT_BOOL
    +}
    +output_arg {
    +  description: "If `pred` is false, data will be forwarded to this output."
    +  name: "output_false"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "If `pred` is true, data will be forwarded to this output."
    +  name: "output_true"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Outputs random values from a normal distribution.
    +--
    +-- The generated values will have mean 0 and standard deviation 1.
    +randomStandardNormal :: forall v1 t dtype . (TensorType t,
    +                                             OneOf '[Data.Int.Int32,
    +                                                     Data.Int.Int64] t,
    +                                             TensorType dtype,
    +                                             OneOf '[Data.Word.Word16, Double,
    +                                                     Float] dtype) =>
    +                        Tensor v1 t -- ^ __shape__: The shape of the output tensor.
    +                        -> Tensor Value dtype -- ^ __output__: A tensor of the specified shape filled with random normal values.
    +randomStandardNormal shape | eqLengthGuard [] =
    +    buildOp (opDef "RandomStandardNormal"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        shape
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  description: "The type of the output."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The shape of the output tensor."
    +  name: "shape"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A tensor of the specified shape filled with random normal values."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes sigmoid of `x` element-wise.
    +--
    +-- Specifically, `y = 1 / (1 + exp(-x))`.
    +sigmoid :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                (Data.Complex.Complex Float),
    +                                                Data.Word.Word16, Double,
    +                                                Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor Value t -- ^ __y__
    +sigmoid x | eqLengthGuard [] =
    +    buildOp (opDef "Sigmoid"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Generate a single randomly distorted bounding box for an image.
    +--
    +-- Bounding box annotations are often supplied in addition to ground-truth labels
    +-- in image recognition or object localization tasks. A common technique for
    +-- training such a system is to randomly distort an image while preserving
    +-- its content, i.e. *data augmentation*. This Op outputs a randomly distorted
    +-- localization of an object, i.e. bounding box, given an `image_size`,
    +-- `bounding_boxes` and a series of constraints.
    +-- 
    +-- The output of this Op is a single bounding box that may be used to crop the
    +-- original image. The output is returned as 3 tensors: `begin`, `size` and
    +-- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
    +-- image. The latter may be supplied to `tf.image.draw_bounding_box` to visualize
    +-- what the bounding box looks like.
    +-- 
    +-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
    +-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
    +-- height of the underlying image.
    +-- 
    +-- For example,
    +-- 
    +--     # Generate a single distorted bounding box.
    +--     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
    +--         tf.shape(image),
    +--         bounding_boxes=bounding_boxes)
    +-- 
    +--     # Draw the bounding box in an image summary.
    +--     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
    +--                                                   bbox_for_draw)
    +--     tf.image_summary('images_with_box', image_with_box)
    +-- 
    +--     # Employ the bounding box to distort the image.
    +--     distorted_image = tf.slice(image, begin, size)
    +-- 
    +-- Note that if no bounding box information is available, setting
    +-- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
    +-- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
    +-- false and no bounding boxes are supplied, an error is raised.
    +sampleDistortedBoundingBox :: forall v1 v2 t . (TensorType t,
    +                                                OneOf '[Data.Int.Int16,
    +                                                        Data.Int.Int32,
    +                                                        Data.Int.Int64,
    +                                                        Data.Int.Int8,
    +                                                        Data.Word.Word8] t) =>
    +                              Tensor v1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
    +                              -> Tensor v2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
    +                                                 -- associated with the image.
    +                              -> (Tensor Value t, Tensor Value t,
    +                                  Tensor Value Float)
    +                              -- ^ (__begin__, __size__, __bboxes__)
    +                              --
    +                              -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
    +                              -- `tf.slice`.
    +                              --
    +                              -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
    +                              -- `tf.slice`.
    +                              --
    +                              -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
    +                              -- Provide as input to `tf.image.draw_bounding_boxes`.
    +sampleDistortedBoundingBox image_size bounding_boxes | eqLengthGuard [] =
    +    buildOp (opDef "SampleDistortedBoundingBox"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        image_size bounding_boxes
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`.  Otherwise, it is seeded by a random\nseed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  default_value { f: 0.1 }
    +  description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied."
    +  name: "min_object_covered"
    +  type: "float"
    +}
    +attr {
    +  default_value { list { f: 0.75 f: 1.33 } }
    +  description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range."
    +  name: "aspect_ratio_range"
    +  type: "list(float)"
    +}
    +attr {
    +  default_value { list { f: 5.0e-2 f: 1.0 } }
    +  description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range."
    +  name: "area_range"
    +  type: "list(float)"
    +}
    +attr {
    +  default_value { i: 100 }
    +  description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage."
    +  name: "max_attempts"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error."
    +  name: "use_image_if_no_bounding_boxes"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "1-D, containing `[height, width, channels]`."
    +  name: "image_size"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image."
    +  name: "bounding_boxes"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`."
    +  name: "begin"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`."
    +  name: "size"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`."
    +  name: "bboxes"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Returns the truth value of (x > y) element-wise.
    +--
    +-- *NOTE*: `Greater` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +greater :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                   Data.Int.Int32,
    +                                                   Data.Int.Int64,
    +                                                   Data.Int.Int8,
    +                                                   Data.Word.Word16,
    +                                                   Data.Word.Word8, Double,
    +                                                   Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor v2 t -- ^ __y__
    +           -> Tensor Value Bool -- ^ __z__
    +greater x y | eqLengthGuard [] =
    +    buildOp (opDef "Greater"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Makes its input available to the next iteration.
    +
    +refNextIteration :: forall v1 t . (TensorType t) =>
    +                    Tensor v1 t -- ^ __data__: The tensor to be made available to the next iteration.
    +                    -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +refNextIteration data' | eqLengthGuard [] =
    +    buildOp (opDef "RefNextIteration"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be made available to the next iteration."
    +  is_ref: true
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  is_ref: true
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | SpaceToDepth for tensors of type T.
    +--
    +-- Rearranges blocks of spatial data, into depth. More specifically,
    +-- this op outputs a copy of the input tensor where values from the `height`
    +-- and `width` dimensions are moved to the `depth` dimension.
    +-- The attr `block_size` indicates the input block size and how the data is moved.
    +-- 
    +--   * Non-overlapping blocks of size `block_size x block size` are rearranged
    +--     into depth at each location.
    +--   * The depth of the output tensor is `input_depth * block_size * block_size`.
    +--   * The input tensor's height and width must be divisible by block_size.
    +-- 
    +-- That is, assuming the input is in the shape:
    +-- `[batch, height, width, depth]`,
    +-- the shape of the output will be:
    +-- `[batch, height/block_size, width/block_size, depth*block_size*block_size]`
    +-- 
    +-- This operation requires that the input tensor be of rank 4, and that
    +-- `block_size` be >=1 and a divisor of both the input `height` and `width`.
    +-- 
    +-- This operation is useful for resizing the activations between convolutions
    +-- (but keeping all data), e.g. instead of pooling. It is also useful for training
    +-- purely convolutional models.
    +-- 
    +-- For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1], [2]],
    +--       [[3], [4]]]]
    +-- ```
    +-- 
    +-- This operation will output a tensor of shape `[1, 1, 1, 4]`:
    +-- 
    +-- ```prettyprint
    +-- [[[[1, 2, 3, 4]]]]
    +-- ```
    +-- 
    +-- Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
    +-- the corresponding output will have a single element (i.e. width and height are
    +-- both 1) and will have a depth of 4 channels (1 * block_size * block_size).
    +-- The output element shape is `[1, 1, 4]`.
    +-- 
    +-- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1, 2, 3], [4, 5, 6]],
    +--       [[7, 8, 9], [10, 11, 12]]]]
    +-- ```
    +-- 
    +-- This operation, for block_size of 2, will return the following tensor of shape
    +-- `[1, 1, 1, 12]`
    +-- 
    +-- ```prettyprint
    +-- [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
    +-- ```
    +-- 
    +-- Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1],   [2],  [5],  [6]],
    +--       [[3],   [4],  [7],  [8]],
    +--       [[9],  [10], [13],  [14]],
    +--       [[11], [12], [15],  [16]]]]
    +-- ```
    +-- 
    +-- the operator will return the following tensor of shape `[1 2 2 4]`:
    +-- 
    +-- ```prettyprint
    +-- x = [[[[1, 2, 3, 4],
    +--        [5, 6, 7, 8]],
    +--       [[9, 10, 11, 12],
    +--        [13, 14, 15, 16]]]]
    +-- ```
    +spaceToDepth :: forall v1 t . (TensorType t) =>
    +                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block.
    +                -> Tensor v1 t -- ^ __input__
    +                -> Tensor Value t -- ^ __output__
    +spaceToDepth block_size input | eqLengthGuard [] =
    +    buildOp (opDef "SpaceToDepth"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "block_size" .~ block_size)
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The size of the spatial block."
    +  has_minimum: true
    +  minimum: 2
    +  name: "block_size"
    +  type: "int"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Does nothing. Serves as a control trigger for scheduling.
    +--
    +-- Only useful as a placeholder for control edges.
    +controlTrigger :: ControlNode
    +controlTrigger  | eqLengthGuard [] =
    +    buildOp (opDef "ControlTrigger")
    +        
    +{-
    +
    +-}
    +
    +-- | Divides a variable reference by sparse updates.
    +--
    +-- This operation computes
    +-- 
    +--     # Scalar indices
    +--     ref[indices, ...] /= updates[...]
    +-- 
    +--     # Vector indices (for each i)
    +--     ref[indices[i], ...] /= updates[i, ...]
    +-- 
    +--     # High rank indices (for each i, ..., j)
    +--     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
    +-- 
    +-- This operation outputs `ref` after the update is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +-- 
    +-- Duplicate entries are handled correctly: if multiple `indices` reference
    +-- the same location, their contributions divide.
    +-- 
    +-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
    +scatterDiv :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                            OneOf '[(Data.Complex.Complex Double),
    +                                                    (Data.Complex.Complex Float),
    +                                                    Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t,
    +                                            TensorType tindices,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __ref__: Should be from a `Variable` node.
    +              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
    +              -> Tensor v3 t -- ^ __updates__: A tensor of values that `ref` is divided by.
    +              -> Tensor Value t -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
    +              -- to use the updated values after the update is done.
    +scatterDiv ref indices updates | eqLengthGuard [] =
    +    buildOp (opDef "ScatterDiv"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        ref indices updates
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A tensor of indices into the first dimension of `ref`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "A tensor of values that `ref` is divided by."
    +  name: "updates"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Copy Op.
    +--
    +-- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
    +-- device on which the tensor is allocated.
    +-- 
    +-- Unlike the CopyHost Op, this op does not have HostMemory constraint on its
    +-- input or output.
    +copy :: forall v1 t . (TensorType t) =>
    +        Tensor v1 t -- ^ __input__: Input tensor.
    +        -> Tensor Value t -- ^ __output__: Output tensor, deep-copied from input.
    +copy input | eqLengthGuard [] =
    +    buildOp (opDef "Copy"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { s: "" }
    +  description: "The name of the input tensor."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Input tensor." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Output tensor, deep-copied from input."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
    +
    +cropAndResizeGradBoxes :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                                  OneOf '[Data.Int.Int16,
    +                                                          Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Data.Int.Int8,
    +                                                          Data.Word.Word16,
    +                                                          Data.Word.Word8,
    +                                                          Double, Float] t) =>
    +                          Tensor v1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
    +                          -> Tensor v2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
    +                                         -- Both `image_height` and `image_width` need to be positive.
    +                          -> Tensor v3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
    +                                             -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
    +                                             -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
    +                                             -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
    +                                             -- `[0, 1]` interval of normalized image height is mapped to
    +                                             -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
    +                                             -- which case the sampled crop is an up-down flipped version of the original
    +                                             -- image. The width dimension is treated similarly. Normalized coordinates
    +                                             -- outside the `[0, 1]` range are allowed, in which case we use
    +                                             -- `extrapolation_value` to extrapolate the input image values.
    +                          -> Tensor v4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
    +                                                      -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
    +                          -> Tensor Value Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`.
    +cropAndResizeGradBoxes grads image boxes box_ind | eqLengthGuard [] =
    +    buildOp (opDef "CropAndResizeGradBoxes"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        grads image boxes box_ind
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "bilinear" } }
    +  default_value { s: "bilinear" }
    +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    +  name: "method"
    +  type: "string"
    +}
    +input_arg {
    +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    +  name: "grads"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
    +  name: "image"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    +  name: "boxes"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    +  name: "box_ind"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A 2-D tensor of shape `[num_boxes, 4]`."
    +  name: "output"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes the mean along sparse segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
    +-- dimension, selecting a subset of dimension 0, specified by `indices`.
    +sparseSegmentMean :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double,
    +                                                                     Float] t,
    +                                               TensorType tidx,
    +                                               OneOf '[Data.Int.Int32,
    +                                                       Data.Int.Int64] tidx) =>
    +                     Tensor v1 t -- ^ __data__
    +                     -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
    +                     -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
    +                     -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +                     -- has size `k`, the number of segments.
    +sparseSegmentMean data' indices segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SparseSegmentMean"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        data' indices segment_ids
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor. Has same rank as `segment_ids`."
    +  name: "indices"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "A 1-D tensor. Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update 'ref' by assigning 'value' to it.
    +--
    +-- This operation outputs "ref" after the assignment is done.
    +-- This makes it easier to chain operations that need to use the reset value.
    +assign :: forall v1 v2 t . (TensorType t) =>
    +          Tensor v1 t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
    +          -> Tensor v2 t -- ^ __value__: The value to be assigned to the variable.
    +          -> Tensor Value t -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
    +          -- to use the new value after the variable has been reset.
    +assign ref value | eqLengthGuard [] =
    +    buildOp (opDef "Assign"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        ref value
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { b: true }
    +  description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to.  If false,\n\'ref\' will take on the shape of \'value\'."
    +  name: "validate_shape"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a `Variable` node. May be uninitialized."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The value to be assigned to the variable."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been reset."
    +  is_ref: true
    +  name: "output_ref"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Restores a tensor from checkpoint files.
    +--
    +-- Reads a tensor stored in one or several files. If there are several files (for
    +-- instance because a tensor was saved as slices), `file_pattern` may contain
    +-- wildcard symbols (`*` and `?`) in the filename portion only, not in the
    +-- directory portion.
    +-- 
    +-- If a `file_pattern` matches several files, `preferred_shard` can be used to hint
    +-- in which file the requested tensor is likely to be found. This op will first
    +-- open the file at index `preferred_shard` in the list of matching files and try
    +-- to restore tensors from that file.  Only if some tensors or tensor slices are
    +-- not found in that first file, then the Op opens all the files. Setting
    +-- `preferred_shard` to match the value passed as the `shard` input
    +-- of a matching `Save` Op may speed up Restore.  This attribute only affects
    +-- performance, not correctness.  The default value -1 means files are processed in
    +-- order.
    +-- 
    +-- See also `RestoreSlice`.
    +restore :: forall v1 v2 dt . (TensorType dt) =>
    +           Tensor v1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
    +                                                -- which we read the tensor.
    +           -> Tensor v2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
    +                                                   -- restored.
    +           -> Tensor Value dt -- ^ __tensor__: The restored tensor.
    +restore file_pattern tensor_name | eqLengthGuard [] =
    +    buildOp (opDef "Restore"
    +             & opAttr "dt" .~ tensorType (undefined :: dt))
    +        file_pattern tensor_name
    +{-
    +attr {
    +  description: "The type of the tensor to be restored."
    +  name: "dt"
    +  type: "type"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "Index of file to open first if multiple files match\n`file_pattern`."
    +  name: "preferred_shard"
    +  type: "int"
    +}
    +input_arg {
    +  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
    +  name: "file_pattern"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Must have a single element. The name of the tensor to be\nrestored."
    +  name: "tensor_name"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The restored tensor." name: "tensor" type_attr: "dt"
    +}
    +-}
    +
    +-- | Computes gradients of the maxpooling function.
    +
    +maxPoolGradWithArgmax :: forall v1 v2 v3 t targmax . (TensorType t,
    +                                                      OneOf '[Data.Word.Word16,
    +                                                              Float] t,
    +                                                      TensorType targmax,
    +                                                      OneOf '[Data.Int.Int32,
    +                                                              Data.Int.Int64] targmax) =>
    +                         Tensor v1 t -- ^ __input__: The original input.
    +                         -> Tensor v2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
    +                                        -- output of `max_pool`.
    +                         -> Tensor v3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
    +                         -> Tensor Value t -- ^ __output__: Gradients w.r.t. the input of `max_pool`.
    +maxPoolGradWithArgmax input grad argmax | eqLengthGuard [] =
    +    buildOp (opDef "MaxPoolGradWithArgmax"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Targmax" .~ tensorType (undefined :: targmax))
    +        input grad argmax
    +{-
    +attr {
    +  description: "The size of the window for each dimension of the input tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Targmax"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The original input." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the\noutput of `max_pool`."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The indices of the maximum values chosen for each output of `max_pool`."
    +  name: "argmax"
    +  type_attr: "Targmax"
    +}
    +output_arg {
    +  description: "Gradients w.r.t. the input of `max_pool`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Checks a tensor for NaN and Inf values.
    +--
    +-- When run, reports an `InvalidArgument` error if `tensor` has any values
    +-- that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
    +checkNumerics :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                      Float] t) =>
    +                 Tensor v1 t -- ^ __tensor__
    +                 -> Tensor Value t -- ^ __output__
    +checkNumerics tensor | eqLengthGuard [] =
    +    buildOp (opDef "CheckNumerics"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tensor
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "Prefix of the error message."
    +  name: "message"
    +  type: "string"
    +}
    +input_arg { name: "tensor" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns a tensor of zeros with the same shape and type as x.
    +
    +zerosLike :: forall v1 t . (TensorType t) =>
    +             Tensor v1 t -- ^ __x__: a tensor of type T.
    +             -> Tensor Value t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros.
    +zerosLike x | eqLengthGuard [] =
    +    buildOp (opDef "ZerosLike"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "a tensor of type T." name: "x" type_attr: "T"
    +}
    +output_arg {
    +  description: "a tensor of the same shape and type as x but filled with zeros."
    +  name: "y"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Reads and outputs the entire contents of the input filename.
    +
    +readFile :: Tensor v1 Data.ByteString.ByteString -- ^ __filename__
    +            -> Tensor Value Data.ByteString.ByteString -- ^ __contents__
    +readFile filename | eqLengthGuard [] =
    +    buildOp (opDef "ReadFile")
    +        filename
    +{-
    +input_arg { name: "filename" type: DT_STRING }
    +output_arg { name: "contents" type: DT_STRING }
    +-}
    +
    +-- | Shuffle dimensions of x according to a permutation.
    +--
    +-- The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
    +--   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
    +transpose :: forall v1 v2 t tperm . (TensorType t, TensorType tperm,
    +                                     OneOf '[Data.Int.Int32,
    +                                             Data.Int.Int64] tperm) =>
    +             Tensor v1 t -- ^ __x__
    +             -> Tensor v2 tperm -- ^ __perm__
    +             -> Tensor Value t -- ^ __y__
    +transpose x perm | eqLengthGuard [] =
    +    buildOp (opDef "Transpose"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tperm" .~ tensorType (undefined :: tperm))
    +        x perm
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tperm"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "perm" type_attr: "Tperm" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor.
    +
    +parseTensor :: forall v1 out_type . (TensorType out_type) =>
    +               Tensor v1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto.
    +               -> Tensor Value out_type -- ^ __output__: A Tensor of type `out_type`.
    +parseTensor serialized | eqLengthGuard [] =
    +    buildOp (opDef "ParseTensor"
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type))
    +        serialized
    +{-
    +attr {
    +  description: "The type of the serialized tensor.  The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place."
    +  name: "out_type"
    +  type: "type"
    +}
    +input_arg {
    +  description: "A scalar string containing a serialized TensorProto proto."
    +  name: "serialized"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A Tensor of type `out_type`."
    +  name: "output"
    +  type_attr: "out_type"
    +}
    +-}
    +
    +-- | Computes acos of x element-wise.
    +
    +acos :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int32, Data.Int.Int64,
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +acos x | eqLengthGuard [] =
    +    buildOp (opDef "Acos"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Bitcasts a tensor from one type to another without copying data.
    +--
    +-- Given a tensor `input`, this operation returns a tensor that has the same buffer
    +-- data as `input` with datatype `type`.
    +-- 
    +-- If the input datatype `T` is larger than the output datatype `type` then the
    +-- shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
    +-- 
    +-- If `T` is smaller than `type`, the operator requires that the rightmost
    +-- dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
    +-- [..., sizeof(`type`)/sizeof(`T`)] to [...].
    +-- 
    +-- *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
    +-- endian orderings will give different results.
    +bitcast :: forall v1 t type' . (TensorType t,
    +                                OneOf '[(Data.Complex.Complex Double),
    +                                        (Data.Complex.Complex Float),
    +                                        Data.Int.Int16, Data.Int.Int32,
    +                                        Data.Int.Int64, Data.Int.Int8,
    +                                        Data.Word.Word16, Data.Word.Word8,
    +                                        Double, Float] t, TensorType type',
    +                                OneOf '[(Data.Complex.Complex Double),
    +                                        (Data.Complex.Complex Float),
    +                                        Data.Int.Int16, Data.Int.Int32,
    +                                        Data.Int.Int64, Data.Int.Int8,
    +                                        Data.Word.Word16, Data.Word.Word8,
    +                                        Double, Float] type') =>
    +           Tensor v1 t -- ^ __input__
    +           -> Tensor Value type' -- ^ __output__
    +bitcast input | eqLengthGuard [] =
    +    buildOp (opDef "Bitcast"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "type" .~ tensorType (undefined :: type'))
    +        input
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "type"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "type" }
    +-}
    +
    +-- | Replaces the contents of the table with the specified keys and values.
    +--
    +-- The tensor `keys` must be of the same type as the keys of the table.
    +-- The tensor `values` must be of the type of the table values.
    +lookupTableImport :: forall v1 v2 v3 tin tout . (TensorType tin,
    +                                                 TensorType tout) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
    +                     -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
    +                     -> Tensor v3 tout -- ^ __values__: Values to associate with keys.
    +                     -> ControlNode
    +lookupTableImport table_handle keys values | eqLengthGuard [] =
    +    buildOp (opDef "LookupTableImport"
    +             & opAttr "Tin" .~ tensorType (undefined :: tin)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        table_handle keys values
    +{-
    +attr { name: "Tin" type: "type" }
    +attr { name: "Tout" type: "type" }
    +input_arg {
    +  description: "Handle to the table."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Any shape.  Keys to look up."
    +  name: "keys"
    +  type_attr: "Tin"
    +}
    +input_arg {
    +  description: "Values to associate with keys."
    +  name: "values"
    +  type_attr: "Tout"
    +}
    +-}
    +
    +-- | The backward operation for "BiasAdd" on the "bias" tensor.
    +--
    +-- It accumulates all the values from out_backprop into the feature dimension.
    +-- For NHWC data format, the feature dimension is the last. For NCHW data format,
    +-- the feature dimension is the third-to-last.
    +biasAddGrad :: forall v1 t . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Int.Int16, Data.Int.Int32,
    +                                      Data.Int.Int64, Data.Int.Int8,
    +                                      Data.Word.Word16, Data.Word.Word8, Double,
    +                                      Float] t) =>
    +               Tensor v1 t -- ^ __out_backprop__: Any number of dimensions.
    +               -> Tensor Value t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`.
    +biasAddGrad out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "BiasAddGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Any number of dimensions."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D with size the feature dimension of `out_backprop`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | 
    +
    +batchSelfAdjointEig :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                       Tensor v1 t -- ^ __input__
    +                       -> Tensor Value t -- ^ __output__
    +batchSelfAdjointEig input | eqLengthGuard [] =
    +    buildOp (opDef "BatchSelfAdjointEig"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the product of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +prod :: forall v1 v2 t tidx . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Int.Int16, Data.Int.Int32,
    +                                       Data.Int.Int64, Data.Int.Int8,
    +                                       Data.Word.Word16, Data.Word.Word8,
    +                                       Double, Float] t, TensorType tidx,
    +                               OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
    +        Tensor v1 t -- ^ __input__: The tensor to reduce.
    +        -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +        -> Tensor Value t -- ^ __output__: The reduced tensor.
    +prod input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Prod"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Resize `images` to `size` using bilinear interpolation.
    +--
    +-- Input images can be of different types but output images are always float.
    +resizeBilinear :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                          Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Data.Int.Int8,
    +                                                          Data.Word.Word16,
    +                                                          Data.Word.Word8,
    +                                                          Double, Float] t) =>
    +                  Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
    +                  -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
    +                                              -- new size for the images.
    +                  -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
    +                  -- `[batch, new_height, new_width, channels]`.
    +resizeBilinear images size | eqLengthGuard [] =
    +    buildOp (opDef "ResizeBilinear"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "images"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
    +  name: "size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
    +  name: "resized_images"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Unpack the data from the input value into TensorArray elements.
    +--
    +-- **WARNING: This op is deprecated.**
    +-- 
    +-- Instead of this op, use `TensorArrayScatter` with
    +-- `indices = RangeOp(0, SizeOp(value)[0])`.
    +tensorArrayUnpack :: forall v1 v2 v3 t . (TensorType t) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                     -> Tensor v2 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
    +                     -> Tensor v3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                     -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
    +tensorArrayUnpack handle value flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayUnpack"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        handle value flow_in
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The concatenated tensor to write to the TensorArray."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_out"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | 
    +
    +batchMatrixDeterminant :: forall v1 t . (TensorType t, OneOf '[Double,
    +                                                               Float] t) =>
    +                          Tensor v1 t -- ^ __input__
    +                          -> Tensor Value t -- ^ __output__
    +batchMatrixDeterminant input | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixDeterminant"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the sum of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +sum :: forall v1 v2 t tidx . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Int.Int16, Data.Int.Int32,
    +                                      Data.Int.Int64, Data.Int.Int8,
    +                                      Data.Word.Word16, Data.Word.Word8, Double,
    +                                      Float] t, TensorType tidx,
    +                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
    +       Tensor v1 t -- ^ __input__: The tensor to reduce.
    +       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +       -> Tensor Value t -- ^ __output__: The reduced tensor.
    +sum input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Sum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most
    +--
    +-- 2 dimensions of `input`.
    +iFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +          -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
    +          -- dimensions of `input` are replaced with their inverse 2D Fourier Transform.
    +iFFT2D input | eqLengthGuard [] =
    +    buildOp (opDef "IFFT2D")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\ndimensions of `input` are replaced with their inverse 2D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | Creates a tensor filled with a scalar value.
    +--
    +-- This operation creates a tensor of shape `dims` and fills it with `value`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # Output tensor has shape [2, 3].
    +-- fill([2, 3], 9) ==> [[9, 9, 9]
    +--                      [9, 9, 9]]
    +-- ```
    +fill :: forall v1 v2 t . (TensorType t) =>
    +        Tensor v1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor.
    +        -> Tensor v2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor.
    +        -> Tensor Value t -- ^ __output__
    +fill dims value | eqLengthGuard [] =
    +    buildOp (opDef "Fill"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        dims value
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "1-D. Represents the shape of the output tensor."
    +  name: "dims"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "0-D (scalar). Value to fill the returned tensor."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Generates labels for candidate sampling with a learned unigram distribution.
    +--
    +-- A unigram sampler could use a fixed unigram distribution read from a
    +-- file or passed in as an in-memory array instead of building up the distribution
    +-- from data on the fly. There is also an option to skew the distribution by
    +-- applying a distortion power to the weights.
    +-- 
    +-- The vocabulary file should be in CSV-like format, with the last field
    +-- being the weight associated with the word.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +fixedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
    +                                -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                                -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
    +                                -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                                        -- candidates in a batch are unique. This requires some approximation to
    +                                        -- estimate the post-rejection sampling probabilities.
    +                                -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                            -- IDs of the num_true target_classes in the corresponding original label.
    +                                -> (Tensor Value Data.Int.Int64,
    +                                    Tensor Value Float, Tensor Value Float)
    +                                -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                                --
    +                                -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                                -- the ID of a sampled candidate.
    +                                --
    +                                -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                                -- the number of times each candidate is expected to occur in a batch
    +                                -- of sampled candidates. If unique=true, then this is a probability.
    +                                --
    +                                -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                                -- candidate representing the number of times the candidate is expected
    +                                -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                                -- probability.
    +fixedUnigramCandidateSampler num_sampled num_true range_max unique
    +                             true_classes | eqLengthGuard [] =
    +    buildOp (opDef "FixedUnigramCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "range_max" .~ range_max
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to randomly sample per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  description: "The sampler will sample integers from the interval [0, range_max)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "range_max"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op."
    +  name: "vocab_file"
    +  type: "string"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution."
    +  name: "distortion"
    +  type: "float"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0."
    +  name: "num_reserved_ids"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 1 }
    +  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_shards"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used."
    +  has_minimum: true
    +  name: "shard"
    +  type: "int"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op."
    +  name: "unigrams"
    +  type: "list(float)"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
    +--
    +-- The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
    +-- `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
    +-- input channel is processed independently of the others with its own structuring
    +-- function. The `output` tensor has shape
    +-- `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
    +-- tensor depend on the `padding` algorithm. We currently only support the default
    +-- "NHWC" `data_format`.
    +-- 
    +-- In detail, the grayscale morphological 2-D dilation is the max-sum correlation
    +-- (for consistency with `conv2d`, we use unmirrored filters):
    +-- 
    +--     output[b, y, x, c] =
    +--        max_{dy, dx} input[b,
    +--                           strides[1] * y + rates[1] * dy,
    +--                           strides[2] * x + rates[2] * dx,
    +--                           c] +
    +--                     filter[dy, dx, c]
    +-- 
    +-- Max-pooling is a special case when the filter has size equal to the pooling
    +-- kernel size and contains all zeros.
    +-- 
    +-- Note on duality: The dilation of `input` by the `filter` is equal to the
    +-- negation of the erosion of `-input` by the reflected `filter`.
    +dilation2D :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                      Data.Int.Int32,
    +                                                      Data.Int.Int64,
    +                                                      Data.Int.Int8,
    +                                                      Data.Word.Word16,
    +                                                      Data.Word.Word8, Double,
    +                                                      Float] t) =>
    +              Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
    +              -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
    +              -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`.
    +dilation2D input filter | eqLengthGuard [] =
    +    buildOp (opDef "Dilation2D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "rates"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D with shape `[filter_height, filter_width, depth]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Compute the polygamma function \\(\psi^{(n)}(x)\\).
    +--
    +-- The polygamma function is defined as:
    +-- 
    +-- ```
    +-- \psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)
    +-- ```
    +-- where \\(\psi(x)\\) is the digamma function.
    +polygamma :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +             Tensor v1 t -- ^ __a__
    +             -> Tensor v2 t -- ^ __x__
    +             -> Tensor Value t -- ^ __z__
    +polygamma a x | eqLengthGuard [] =
    +    buildOp (opDef "Polygamma"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a x
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "T" }
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Return the same ref tensor as the input ref tensor.
    +
    +refIdentity :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
    +               -> Tensor Value t -- ^ __output__
    +refIdentity input | eqLengthGuard [] =
    +    buildOp (opDef "RefIdentity"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { is_ref: true name: "input" type_attr: "T" }
    +output_arg { is_ref: true name: "output" type_attr: "T" }
    +-}
    +
    +-- | PNG-encode an image.
    +--
    +-- `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
    +-- where `channels` is:
    +-- 
    +-- *   1: for grayscale.
    +-- *   2: for grayscale + alpha.
    +-- *   3: for RGB.
    +-- *   4: for RGBA.
    +-- 
    +-- The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
    +-- default or a value from 0 to 9.  9 is the highest compression level, generating
    +-- the smallest output, but is slower.
    +encodePng :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                  Data.Word.Word8] t) =>
    +             Tensor v1 t -- ^ __image__: 3-D with shape `[height, width, channels]`.
    +             -> Tensor Value Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image.
    +encodePng image | eqLengthGuard [] =
    +    buildOp (opDef "EncodePng"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        image
    +{-
    +attr {
    +  default_value { i: -1 }
    +  description: "Compression level."
    +  name: "compression"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
    +  default_value { type: DT_UINT8 }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "3-D with shape `[height, width, channels]`."
    +  name: "image"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "0-D. PNG-encoded image."
    +  name: "contents"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Updates the table to associates keys with values.
    +--
    +-- The tensor `keys` must be of the same type as the keys of the table.
    +-- The tensor `values` must be of the type of the table values.
    +lookupTableInsert :: forall v1 v2 v3 tin tout . (TensorType tin,
    +                                                 TensorType tout) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
    +                     -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
    +                     -> Tensor v3 tout -- ^ __values__: Values to associate with keys.
    +                     -> ControlNode
    +lookupTableInsert table_handle keys values | eqLengthGuard [] =
    +    buildOp (opDef "LookupTableInsert"
    +             & opAttr "Tin" .~ tensorType (undefined :: tin)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        table_handle keys values
    +{-
    +attr { name: "Tin" type: "type" }
    +attr { name: "Tout" type: "type" }
    +input_arg {
    +  description: "Handle to the table."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Any shape.  Keys to look up."
    +  name: "keys"
    +  type_attr: "Tin"
    +}
    +input_arg {
    +  description: "Values to associate with keys."
    +  name: "values"
    +  type_attr: "Tout"
    +}
    +-}
    +
    +-- | 
    +
    +batchIFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +               -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchIFFT2D input | eqLengthGuard [] =
    +    buildOp (opDef "BatchIFFT2D")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | Finds unique elements in a 1-D tensor.
    +--
    +-- This operation returns a tensor `y` containing all of the unique elements of `x`
    +-- sorted in the same order that they occur in `x`. This operation also returns a
    +-- tensor `idx` the same size as `x` that contains the index of each value of `x`
    +-- in the unique output `y`. Finally, it returns a third tensor `count` that
    +-- contains the count of each element of `y` in `x`. In other words:
    +-- 
    +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    +-- y, idx, count = unique_with_counts(x)
    +-- y ==> [1, 2, 4, 7, 8]
    +-- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    +-- count ==> [2, 1, 3, 1, 2]
    +-- ```
    +uniqueWithCounts :: forall v1 t out_idx . (TensorType t, TensorType out_idx,
    +                                           OneOf '[Data.Int.Int32,
    +                                                   Data.Int.Int64] out_idx) =>
    +                    Tensor v1 t -- ^ __x__: 1-D.
    +                    -> (Tensor Value t, Tensor Value out_idx,
    +                        Tensor Value out_idx)
    +                    -- ^ (__y__, __idx__, __count__)
    +                    --
    +                    -- * __y__: 1-D.
    +                    --
    +                    -- * __idx__: 1-D.
    +                    --
    +                    -- * __count__: 1-D.
    +uniqueWithCounts x | eqLengthGuard [] =
    +    buildOp (opDef "UniqueWithCounts"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
    +        x
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_idx"
    +  type: "type"
    +}
    +input_arg { description: "1-D." name: "x" type_attr: "T" }
    +output_arg { description: "1-D." name: "y" type_attr: "T" }
    +output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
    +output_arg {
    +  description: "1-D." name: "count" type_attr: "out_idx"
    +}
    +-}
    +
    +-- | Gather values or slices from `params` according to `indices`.
    +--
    +-- `params` is a Tensor of rank `R` and `indices` is a Tensor of rank `M`.
    +-- 
    +-- `indices` must be integer tensor, containing indices into `params`.
    +-- It must be shape `[d_0, ..., d_N, R]` where `0 < R <= M`.
    +-- 
    +-- The innermost dimension of `indices` (with length `R`) corresponds to
    +-- indices into elements (if `R = M`) or slices (if `R < M`) along the `N`th
    +-- dimension of `params`.
    +-- 
    +-- Produces an output tensor with shape
    +-- 
    +--     [d_0, ..., d_{n-1}, params.shape[R], ..., params.shape[M-1]].
    +-- 
    +-- Some examples below.
    +-- 
    +-- Simple indexing into a matrix:
    +-- 
    +--     indices = [[0, 0], [1, 1]]
    +--     params = [['a', 'b'], ['c', 'd']]
    +--     output = ['a', 'd']
    +-- 
    +-- Slice indexing into a matrix:
    +-- 
    +--     indices = [[1], [0]]
    +--     params = [['a', 'b'], ['c', 'd']]
    +--     output = [['c', 'd'], ['a', 'b']]
    +-- 
    +-- Indexing into a 3-tensor:
    +-- 
    +--     indices = [[1]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = [[['a1', 'b1'], ['c1', 'd1']]]
    +-- 
    +-- 
    +--     indices = [[0, 1], [1, 0]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = [['c0', 'd0'], ['a1', 'b1']]
    +-- 
    +-- 
    +--     indices = [[0, 0, 1], [1, 0, 1]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = ['b0', 'b1']
    +-- 
    +-- Batched indexing into a matrix:
    +-- 
    +--     indices = [[[0, 0]], [[0, 1]]]
    +--     params = [['a', 'b'], ['c', 'd']]
    +--     output = [['a'], ['b']]
    +-- 
    +-- Batched slice indexing into a matrix:
    +-- 
    +--     indices = [[[1]], [[0]]]
    +--     params = [['a', 'b'], ['c', 'd']]
    +--     output = [[['c', 'd']], [['a', 'b']]]
    +-- 
    +-- Batched indexing into a 3-tensor:
    +-- 
    +--     indices = [[[1]], [[0]]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = [[[['a1', 'b1'], ['c1', 'd1']]],
    +--               [[['a0', 'b0'], ['c0', 'd0']]]]
    +-- 
    +-- 
    +--     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = [[['c0', 'd0'], ['a1', 'b1']],
    +--               [['a0', 'b0'], ['c1', 'd1']]]
    +-- 
    +-- 
    +--     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
    +--     params = [[['a0', 'b0'], ['c0', 'd0']],
    +--               [['a1', 'b1'], ['c1', 'd1']]]
    +--     output = [['b0', 'b1'], ['d0', 'c1']]
    +gatherNd :: forall v1 v2 tindices tparams . (TensorType tindices,
    +                                             OneOf '[Data.Int.Int32,
    +                                                     Data.Int.Int64] tindices,
    +                                             TensorType tparams) =>
    +            Tensor v1 tparams -- ^ __params__: `M-D`.  The tensor from which to gather values.
    +            -> Tensor v2 tindices -- ^ __indices__: `(N+1)-D`.  Index tensor having shape `[d_0, ..., d_N, R]`.
    +            -> Tensor Value tparams -- ^ __output__: `(N+M-R)-D`.  Values from `params` gathered from indices given by
    +            -- `indices`.
    +gatherNd params indices | eqLengthGuard [] =
    +    buildOp (opDef "GatherNd"
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices)
    +             & opAttr "Tparams" .~ tensorType (undefined :: tparams))
    +        params indices
    +{-
    +attr { name: "Tparams" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg {
    +  description: "`M-D`.  The tensor from which to gather values."
    +  name: "params"
    +  type_attr: "Tparams"
    +}
    +input_arg {
    +  description: "`(N+1)-D`.  Index tensor having shape `[d_0, ..., d_N, R]`."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "`(N+M-R)-D`.  Values from `params` gathered from indices given by\n`indices`."
    +  name: "output"
    +  type_attr: "Tparams"
    +}
    +-}
    +
    +-- | Read an element from the TensorArray into output `value`.
    +
    +tensorArrayRead :: forall v1 v2 v3 dtype . (TensorType dtype) =>
    +                   Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                   -> Tensor v2 Data.Int.Int32 -- ^ __index__
    +                   -> Tensor v3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                   -> Tensor Value dtype -- ^ __value__: The tensor that is read from the TensorArray.
    +tensorArrayRead handle index flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayRead"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        handle index flow_in
    +{-
    +attr {
    +  description: "The type of the elem that is returned."
    +  name: "dtype"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg { name: "index" type: DT_INT32 }
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "The tensor that is read from the TensorArray."
    +  name: "value"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Returns up to `num_records` (key, value) pairs produced by a Reader.
    +--
    +-- Will dequeue from the input queue if necessary (e.g. when the
    +-- Reader needs to start reading from a new file since it has finished
    +-- with the previous file).
    +-- It may return less than `num_records` even before the last batch.
    +readerReadUpTo :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`.
    +                  -> Tensor v2 Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
    +                  -> Tensor v3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
    +                  -> (Tensor Value Data.ByteString.ByteString,
    +                      Tensor Value Data.ByteString.ByteString)
    +                  -- ^ (__keys__, __values__)
    +                  --
    +                  -- * __keys__: A 1-D tensor.
    +                  --
    +                  -- * __values__: A 1-D tensor.
    +readerReadUpTo reader_handle queue_handle num_records | eqLengthGuard [] =
    +    buildOp (opDef "ReaderReadUpTo")
    +        reader_handle queue_handle num_records
    +{-
    +input_arg {
    +  description: "Handle to a `Reader`."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Handle to a `Queue`, with string work items."
    +  is_ref: true
    +  name: "queue_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "number of records to read from `Reader`."
    +  name: "num_records"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A 1-D tensor." name: "keys" type: DT_STRING
    +}
    +output_arg {
    +  description: "A 1-D tensor." name: "values" type: DT_STRING
    +}
    +-}
    +
    +-- | Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
    +--
    +-- The regularized incomplete beta integral is defined as:
    +-- 
    +-- ```
    +-- I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}
    +-- ```
    +-- where
    +-- 
    +-- ```
    +-- B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt
    +-- ```
    +-- 
    +-- is the incomplete beta function and \\(B(a, b)\\) is the *complete*
    +-- beta function.
    +betainc :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
    +           Tensor v1 t -- ^ __a__
    +           -> Tensor v2 t -- ^ __b__
    +           -> Tensor v3 t -- ^ __x__
    +           -> Tensor Value t -- ^ __z__
    +betainc a b x | eqLengthGuard [] =
    +    buildOp (opDef "Betainc"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a b x
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "T" }
    +input_arg { name: "b" type_attr: "T" }
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchMatrixBandPart :: forall v1 v2 v3 t . (TensorType t) =>
    +                       Tensor v1 t -- ^ __input__
    +                       -> Tensor v2 Data.Int.Int64 -- ^ __num_lower__
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __num_upper__
    +                       -> Tensor Value t -- ^ __band__
    +batchMatrixBandPart input num_lower num_upper | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixBandPart"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input num_lower num_upper
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "num_lower" type: DT_INT64 }
    +input_arg { name: "num_upper" type: DT_INT64 }
    +output_arg { name: "band" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradients of depthwise convolution with respect to the input.
    +
    +depthwiseConv2dNativeBackpropInput :: forall v1 v2 v3 t . (TensorType t,
    +                                                           OneOf '[Double,
    +                                                                   Float] t) =>
    +                                      Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
    +                                                               -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
    +                                      -> Tensor v2 t -- ^ __filter__: 4-D with shape
    +                                                     -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
    +                                      -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
    +                                                     -- Gradients w.r.t. the output of the convolution.
    +                                      -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
    +                                      -- w.r.t. the input of the convolution.
    +depthwiseConv2dNativeBackpropInput input_sizes filter
    +                                   out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "DepthwiseConv2dNativeBackpropInput"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_sizes filter out_backprop
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
    +  name: "input_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Forwards the `index`th element of `inputs` to `output`.
    +
    +refSelect :: forall v1 v2 t . (TensorType t) =>
    +             Tensor v1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected.
    +             -> [Tensor v2 t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`.
    +             -> Tensor Value t -- ^ __output__: The forwarded tensor.
    +refSelect index inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "RefSelect"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        index inputs
    +{-
    +attr { name: "T" type: "type" }
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +input_arg {
    +  description: "A scalar that determines the input that gets selected."
    +  name: "index"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A list of ref tensors, one of which will be forwarded to `output`."
    +  is_ref: true
    +  name: "inputs"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The forwarded tensor."
    +  is_ref: true
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Exits the current frame to its parent frame.
    +--
    +-- Exit makes its input `data` available to the parent frame.
    +exit :: forall v1 t . (TensorType t) =>
    +        Tensor v1 t -- ^ __data__: The tensor to be made available to the parent frame.
    +        -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +exit data' | eqLengthGuard [] =
    +    buildOp (opDef "Exit"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be made available to the parent frame."
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Looks up keys in a table, outputs the corresponding values.
    +--
    +-- The tensor `keys` must of the same type as the keys of the table.
    +-- The output `values` is of the type of the table values.
    +-- 
    +-- The scalar `default_value` is the value output for keys not present in the
    +-- table. It must also be of the same type as the table values.
    +lookupTableFind :: forall v1 v2 v3 tin tout . (TensorType tin,
    +                                               TensorType tout) =>
    +                   Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
    +                   -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
    +                   -> Tensor v3 tout -- ^ __default_value__
    +                   -> Tensor Value tout -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
    +                   -- for missing keys.
    +lookupTableFind table_handle keys default_value | eqLengthGuard [] =
    +    buildOp (opDef "LookupTableFind"
    +             & opAttr "Tin" .~ tensorType (undefined :: tin)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        table_handle keys default_value
    +{-
    +attr { name: "Tin" type: "type" }
    +attr { name: "Tout" type: "type" }
    +input_arg {
    +  description: "Handle to the table."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Any shape.  Keys to look up."
    +  name: "keys"
    +  type_attr: "Tin"
    +}
    +input_arg { name: "default_value" type_attr: "Tout" }
    +output_arg {
    +  description: "Same shape as `keys`.  Values found in the table, or `default_values`\nfor missing keys."
    +  name: "values"
    +  type_attr: "Tout"
    +}
    +-}
    +
    +-- | Removes dimensions of size 1 from the shape of a tensor.
    +--
    +-- Given a tensor `input`, this operation returns a tensor of the same type with
    +-- all dimensions of size 1 removed. If you don't want to remove all size 1
    +-- dimensions, you can remove specific size 1 dimensions by specifying
    +-- `squeeze_dims`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
    +-- shape(squeeze(t)) ==> [2, 3]
    +-- ```
    +-- 
    +-- Or, to remove specific size 1 dimensions:
    +-- 
    +-- ```prettyprint
    +-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
    +-- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
    +-- ```
    +squeeze :: forall v1 t . (TensorType t) =>
    +           Tensor v1 t -- ^ __input__: The `input` to squeeze.
    +           -> Tensor Value t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of
    +           -- size 1 removed.
    +squeeze input | eqLengthGuard [] =
    +    buildOp (opDef "Squeeze"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { list { } }
    +  description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1."
    +  has_minimum: true
    +  name: "squeeze_dims"
    +  type: "list(int)"
    +}
    +input_arg {
    +  description: "The `input` to squeeze." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the mean of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +mean :: forall v1 v2 t tidx . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Int.Int16, Data.Int.Int32,
    +                                       Data.Int.Int64, Data.Int.Int8,
    +                                       Data.Word.Word16, Data.Word.Word8,
    +                                       Double, Float] t, TensorType tidx,
    +                               OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
    +        Tensor v1 t -- ^ __input__: The tensor to reduce.
    +        -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +        -> Tensor Value t -- ^ __output__: The reduced tensor.
    +mean input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Mean"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | SpaceToBatch for N-D tensors of type T.
    +--
    +-- This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
    +-- grid of blocks of shape `block_shape`, and interleaves these blocks with the
    +-- "batch" dimension (0) such that in the output, the spatial dimensions
    +-- `[1, ..., M]` correspond to the position within the grid, and the batch
    +-- dimension combines both the position within a spatial block and the original
    +-- batch position.  Prior to division into blocks, the spatial dimensions of the
    +-- input are optionally zero padded according to `paddings`.  See below for a
    +-- precise description.
    +spaceToBatchND :: forall v1 v2 v3 t tblock_shape tpaddings . (TensorType t,
    +                                                              TensorType tblock_shape,
    +                                                              OneOf '[Data.Int.Int32,
    +                                                                      Data.Int.Int64] tblock_shape,
    +                                                              TensorType tpaddings,
    +                                                              OneOf '[Data.Int.Int32,
    +                                                                      Data.Int.Int64] tpaddings) =>
    +                  Tensor v1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
    +                              -- where spatial_shape has `M` dimensions.
    +                  -> Tensor v2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
    +                  -> Tensor v3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0.
    +                                         --   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
    +                                         --   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
    +                                         --   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
    +                                         -- 
    +                                         -- This operation is equivalent to the following steps:
    +                                         -- 
    +                                         -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
    +                                         --    input according to `paddings` to produce `padded` of shape `padded_shape`.
    +                                         -- 
    +                                         -- 2. Reshape `padded` to `reshaped_padded` of shape:
    +                                         --      [batch] +
    +                                         --      [padded_shape[1] / block_shape[0],
    +                                         --        block_shape[0],
    +                                         --       ...,
    +                                         --       padded_shape[M] / block_shape[M-1],
    +                                         --       block_shape[M-1]] +
    +                                         --      remaining_shape
    +                                         -- 
    +                                         -- 3. Permute dimensions of `reshaped_padded` to produce
    +                                         --    `permuted_reshaped_padded` of shape:
    +                                         --      block_shape +
    +                                         --      [batch] +
    +                                         --      [padded_shape[1] / block_shape[0],
    +                                         --       ...,
    +                                         --       padded_shape[M] / block_shape[M-1]] +
    +                                         --      remaining_shape
    +                                         -- 
    +                                         -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
    +                                         --    dimension, producing an output tensor of shape:
    +                                         --      [batch * prod(block_shape)] +
    +                                         --      [padded_shape[1] / block_shape[0],
    +                                         --       ...,
    +                                         --       padded_shape[M] / block_shape[M-1]] +
    +                                         --      remaining_shape
    +                                         -- 
    +                                         -- Some examples:
    +                                         -- 
    +                                         -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
    +                                         --     `paddings = [[0, 0], [0, 0]]`:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[1], [2]], [[3], [4]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- The output tensor has shape `[4, 1, 1, 1]` and value:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
    +                                         --     `paddings = [[0, 0], [0, 0]]`:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[1, 2, 3], [4, 5, 6]],
    +                                         --       [[7, 8, 9], [10, 11, 12]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- The output tensor has shape `[4, 1, 1, 3]` and value:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
    +                                         --     `paddings = [[0, 0], [0, 0]]`:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[1],   [2],  [3],  [4]],
    +                                         --       [[5],   [6],  [7],  [8]],
    +                                         --       [[9],  [10], [11],  [12]],
    +                                         --       [[13], [14], [15],  [16]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- The output tensor has shape `[4, 2, 2, 1]` and value:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[1], [3]], [[5], [7]]],
    +                                         --      [[[2], [4]], [[10], [12]]],
    +                                         --      [[[5], [7]], [[13], [15]]],
    +                                         --      [[[6], [8]], [[14], [16]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
    +                                         --     paddings = `[[0, 0], [2, 0]]`:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[1],   [2],  [3],  [4]],
    +                                         --       [[5],   [6],  [7],  [8]]],
    +                                         --      [[[9],  [10], [11],  [12]],
    +                                         --       [[13], [14], [15],  [16]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- The output tensor has shape `[8, 1, 3, 1]` and value:
    +                                         -- 
    +                                         -- ```prettyprint
    +                                         -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
    +                                         --      [[[0], [2], [4]]], [[[0], [10], [12]]],
    +                                         --      [[[0], [5], [7]]], [[[0], [13], [15]]],
    +                                         --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
    +                                         -- ```
    +                                         -- 
    +                                         -- Among others, this operation is useful for reducing atrous convolution into
    +                                         -- regular convolution.
    +                  -> Tensor Value t -- ^ __output__
    +spaceToBatchND input block_shape paddings | eqLengthGuard [] =
    +    buildOp (opDef "SpaceToBatchND"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
    +             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
    +        input block_shape paddings
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tblock_shape"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tpaddings"
    +  type: "type"
    +}
    +input_arg {
    +  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D with shape `[M]`, all values must be >= 1."
    +  name: "block_shape"
    +  type_attr: "Tblock_shape"
    +}
    +input_arg {
    +  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n  `i + 1`, which corresponds to spatial dimension `i`.  It is required that\n  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n   input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n       block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1],\n      block_shape[M-1]] +\n     remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n   `permuted_reshaped_padded` of shape:\n     block_shape +\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n   dimension, producing an output tensor of shape:\n     [batch * prod(block_shape)] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n    paddings = `[[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
    +  name: "paddings"
    +  type_attr: "Tpaddings"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | SpaceToBatch for 4-D tensors of type T.
    +--
    +-- This is a legacy version of the more general SpaceToBatchND.
    +-- 
    +-- Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
    +-- More specifically, this op outputs a copy of the input tensor where values from
    +-- the `height` and `width` dimensions are moved to the `batch` dimension. After
    +-- the zero-padding, both `height` and `width` of the input must be divisible by the
    +-- block size.
    +spaceToBatch :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] tpaddings) =>
    +                Data.Int.Int64 -- ^ __block_size__
    +                -> Tensor v1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`.
    +                -> Tensor v2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
    +                                       --   the padding of the input with zeros across the spatial dimensions as follows:
    +                                       -- 
    +                                       --       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
    +                                       -- 
    +                                       --   The effective spatial dimensions of the zero-padded input tensor will be:
    +                                       -- 
    +                                       --       height_pad = pad_top + height + pad_bottom
    +                                       --       width_pad = pad_left + width + pad_right
    +                                       -- 
    +                                       -- The attr `block_size` must be greater than one. It indicates the block size.
    +                                       -- 
    +                                       --   * Non-overlapping blocks of size `block_size x block size` in the height and
    +                                       --     width dimensions are rearranged into the batch dimension at each location.
    +                                       --   * The batch of the output tensor is `batch * block_size * block_size`.
    +                                       --   * Both height_pad and width_pad must be divisible by block_size.
    +                                       -- 
    +                                       -- The shape of the output will be:
    +                                       -- 
    +                                       --     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
    +                                       --      depth]
    +                                       -- 
    +                                       -- Some examples:
    +                                       -- 
    +                                       -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1], [2]], [[3], [4]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- The output tensor has shape `[4, 1, 1, 1]` and value:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1, 2, 3], [4, 5, 6]],
    +                                       --       [[7, 8, 9], [10, 11, 12]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- The output tensor has shape `[4, 1, 1, 3]` and value:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1],   [2],  [3],  [4]],
    +                                       --       [[5],   [6],  [7],  [8]],
    +                                       --       [[9],  [10], [11],  [12]],
    +                                       --       [[13], [14], [15],  [16]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- The output tensor has shape `[4, 2, 2, 1]` and value:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1], [3]], [[5], [7]]],
    +                                       --      [[[2], [4]], [[10], [12]]],
    +                                       --      [[[5], [7]], [[13], [15]]],
    +                                       --      [[[6], [8]], [[14], [16]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1],   [2],  [3],  [4]],
    +                                       --       [[5],   [6],  [7],  [8]]],
    +                                       --      [[[9],  [10], [11],  [12]],
    +                                       --       [[13], [14], [15],  [16]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- The output tensor has shape `[8, 1, 2, 1]` and value:
    +                                       -- 
    +                                       -- ```prettyprint
    +                                       -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
    +                                       --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
    +                                       -- ```
    +                                       -- 
    +                                       -- Among others, this operation is useful for reducing atrous convolution into
    +                                       -- regular convolution.
    +                -> Tensor Value t -- ^ __output__
    +spaceToBatch block_size input paddings | eqLengthGuard [] =
    +    buildOp (opDef "SpaceToBatch"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
    +             & opAttr "block_size" .~ block_size)
    +        input paddings
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tpaddings"
    +  type: "type"
    +}
    +attr {
    +  has_minimum: true minimum: 2 name: "block_size" type: "int"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, depth]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n  the padding of the input with zeros across the spatial dimensions as follows:\n\n      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n  The effective spatial dimensions of the zero-padded input tensor will be:\n\n      height_pad = pad_top + height + pad_bottom\n      width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n  * Non-overlapping blocks of size `block_size x block size` in the height and\n    width dimensions are rearranged into the batch dimension at each location.\n  * The batch of the output tensor is `batch * block_size * block_size`.\n  * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n     depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
    +  name: "paddings"
    +  type_attr: "Tpaddings"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Performs greedy decoding on the logits given in inputs.
    +--
    +-- A note about the attribute merge_repeated: if enabled, when
    +-- consecutive logits' maximum indices are the same, only the first of
    +-- these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
    +-- becomes "A B" if merge_repeated = True and "A B B B B" if
    +-- merge_repeated = False.
    +-- 
    +-- Regardless of the value of merge_repeated, if the maximum index of a given
    +-- time and batch corresponds to the blank, index `(num_classes - 1)`, no new
    +-- element is emitted.
    +cTCGreedyDecoder :: Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    +                    -> Tensor v2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`.
    +                    -> (Tensor Value Data.Int.Int64,
    +                        Tensor Value Data.Int.Int64,
    +                        Tensor Value Data.Int.Int64, Tensor Value Float)
    +                    -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
    +                    --
    +                    -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`,
    +                    -- of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
    +                    --
    +                    -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`,
    +                    -- of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
    +                    --
    +                    -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor.
    +                    -- Values are: `[batch_size, max_decoded_length]`.
    +                    --
    +                    -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence
    +                    -- log-probabilities.
    +cTCGreedyDecoder inputs sequence_length | eqLengthGuard [] =
    +    buildOp (opDef "CTCGreedyDecoder")
    +        inputs sequence_length
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If True, merge repeated classes in output."
    +  name: "merge_repeated"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    +  name: "inputs"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A vector containing sequence lengths, size `(batch_size)`."
    +  name: "sequence_length"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor<int64, 2>`.  The rows store: [batch, time]."
    +  name: "decoded_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor<int64, 2>`.  The vector stores the decoded classes."
    +  name: "decoded_values"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`."
    +  name: "decoded_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities."
    +  name: "log_probability"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | BatchToSpace for N-D tensors of type T.
    +--
    +-- This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
    +-- `block_shape + [batch]`, interleaves these blocks back into the grid defined by
    +-- the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
    +-- the input.  The spatial dimensions of this intermediate result are then
    +-- optionally cropped according to `crops` to produce the output.  This is the
    +-- reverse of SpaceToBatch.  See below for a precise description.
    +batchToSpaceND :: forall v1 v2 v3 t tblock_shape tcrops . (TensorType t,
    +                                                           TensorType tblock_shape,
    +                                                           OneOf '[Data.Int.Int32,
    +                                                                   Data.Int.Int64] tblock_shape,
    +                                                           TensorType tcrops,
    +                                                           OneOf '[Data.Int.Int32,
    +                                                                   Data.Int.Int64] tcrops) =>
    +                  Tensor v1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
    +                              -- where spatial_shape has M dimensions.
    +                  -> Tensor v2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
    +                  -> Tensor v3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0.
    +                                      --   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
    +                                      --   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
    +                                      --   required that
    +                                      --   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
    +                                      -- 
    +                                      -- This operation is equivalent to the following steps:
    +                                      -- 
    +                                      -- 1. Reshape `input` to `reshaped` of shape:
    +                                      --      [block_shape[0], ..., block_shape[M-1],
    +                                      --       batch / prod(block_shape),
    +                                      --       input_shape[1], ..., input_shape[N-1]]
    +                                      -- 
    +                                      -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape
    +                                      --      [batch / prod(block_shape),
    +                                      -- 
    +                                      --       input_shape[1], block_shape[0],
    +                                      --       ...,
    +                                      --       input_shape[M], block_shape[M-1],
    +                                      -- 
    +                                      --       input_shape[M+1], ..., input_shape[N-1]]
    +                                      -- 
    +                                      -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape
    +                                      --      [batch / prod(block_shape),
    +                                      -- 
    +                                      --       input_shape[1] * block_shape[0],
    +                                      --       ...,
    +                                      --       input_shape[M] * block_shape[M-1],
    +                                      -- 
    +                                      --       input_shape[M+1],
    +                                      --       ...,
    +                                      --       input_shape[N-1]]
    +                                      -- 
    +                                      -- 4. Crop the start and end of dimensions `[1, ..., M]` of
    +                                      --    `reshaped_permuted` according to `crops` to produce the output of shape:
    +                                      --      [batch / prod(block_shape),
    +                                      -- 
    +                                      --       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
    +                                      --       ...,
    +                                      --       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
    +                                      -- 
    +                                      --       input_shape[M+1], ..., input_shape[N-1]]
    +                                      -- 
    +                                      -- Some examples:
    +                                      -- 
    +                                      -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
    +                                      --     `crops = [[0, 0], [0, 0]]`:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- The output tensor has shape `[1, 2, 2, 1]` and value:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[[1], [2]], [[3], [4]]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
    +                                      --     `crops = [[0, 0], [0, 0]]`:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- The output tensor has shape `[1, 2, 2, 3]` and value:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[[1, 2, 3], [4, 5, 6]],
    +                                      --       [[7, 8, 9], [10, 11, 12]]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
    +                                      --     `crops = [[0, 0], [0, 0]]`:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[[1], [3]], [[5], [7]]],
    +                                      --      [[[2], [4]], [[10], [12]]],
    +                                      --      [[[5], [7]], [[13], [15]]],
    +                                      --      [[[6], [8]], [[14], [16]]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- The output tensor has shape `[1, 4, 4, 1]` and value:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[1],   [2],  [3],  [4]],
    +                                      --      [[5],   [6],  [7],  [8]],
    +                                      --      [[9],  [10], [11],  [12]],
    +                                      --      [[13], [14], [15],  [16]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
    +                                      --     `crops = [[0, 0], [2, 0]]`:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
    +                                      --      [[[0], [2], [4]]], [[[0], [10], [12]]],
    +                                      --      [[[0], [5], [7]]], [[[0], [13], [15]]],
    +                                      --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
    +                                      -- ```
    +                                      -- 
    +                                      -- The output tensor has shape `[2, 2, 4, 1]` and value:
    +                                      -- 
    +                                      -- ```prettyprint
    +                                      -- x = [[[[1],   [2],  [3],  [4]],
    +                                      --       [[5],   [6],  [7],  [8]]],
    +                                      --      [[[9],  [10], [11],  [12]],
    +                                      --       [[13], [14], [15],  [16]]]]
    +                                      -- ```
    +                  -> Tensor Value t -- ^ __output__
    +batchToSpaceND input block_shape crops | eqLengthGuard [] =
    +    buildOp (opDef "BatchToSpaceND"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
    +             & opAttr "Tcrops" .~ tensorType (undefined :: tcrops))
    +        input block_shape crops
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tblock_shape"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tcrops"
    +  type: "type"
    +}
    +input_arg {
    +  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D with shape `[M]`, all values must be >= 1."
    +  name: "block_shape"
    +  type_attr: "Tblock_shape"
    +}
    +input_arg {
    +  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is\n  required that\n  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n     [block_shape[0], ..., block_shape[M-1],\n      batch / prod(block_shape),\n      input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1], block_shape[0],\n      ...,\n      input_shape[M], block_shape[M-1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0],\n      ...,\n      input_shape[M] * block_shape[M-1],\n\n      input_shape[M+1],\n      ...,\n      input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n   `reshaped_permuted` according to `crops` to produce the output of shape:\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n      ...,\n      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```"
    +  name: "crops"
    +  type_attr: "Tcrops"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
    +--
    +-- Packs the `N` tensors in `values` into a tensor with rank one higher than each
    +-- tensor in `values`, by packing them along the `axis` dimension.
    +-- Given a list of tensors of shape `(A, B, C)`;
    +-- 
    +-- if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
    +-- if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
    +-- Etc.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'x' is [1, 4]
    +-- # 'y' is [2, 5]
    +-- # 'z' is [3, 6]
    +-- pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
    +-- pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
    +-- ```
    +-- 
    +-- This is the opposite of `unpack`.
    +pack :: forall v1 t . (TensorType t) =>
    +        [Tensor v1 t] -- ^ __values__: Must be of same shape and type.
    +        -> Tensor Value t -- ^ __output__: The packed tensor.
    +pack values | eqLengthGuard [("N", [("values", length values)])] =
    +    buildOp (opDef "Pack"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length values) :: Int64))
    +        values
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { i: 0 }
    +  description: "Dimension along which to pack.  Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`."
    +  name: "axis"
    +  type: "int"
    +}
    +input_arg {
    +  description: "Must be of same shape and type."
    +  name: "values"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The packed tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Returns a one-hot tensor.
    +--
    +-- The locations represented by indices in `indices` take value `on_value`,
    +-- while all other locations take value `off_value`.
    +-- 
    +-- If the input `indices` is rank `N`, the output will have rank `N+1`,
    +-- The new axis is created at dimension `axis` (default: the new axis is
    +-- appended at the end).
    +-- 
    +-- If `indices` is a scalar the output shape will be a vector of length `depth`.
    +-- 
    +-- If `indices` is a vector of length `features`, the output shape will be:
    +-- ```
    +--   features x depth if axis == -1
    +--   depth x features if axis == 0
    +-- ```
    +-- 
    +-- If `indices` is a matrix (batch) with shape `[batch, features]`,
    +-- the output shape will be:
    +-- ```
    +--   batch x features x depth if axis == -1
    +--   batch x depth x features if axis == 1
    +--   depth x batch x features if axis == 0
    +-- ```
    +-- 
    +-- 
    +-- Examples
    +-- =========
    +-- 
    +-- Suppose that
    +-- 
    +-- ```
    +--   indices = [0, 2, -1, 1]
    +--   depth = 3
    +--   on_value = 5.0
    +--   off_value = 0.0
    +--   axis = -1
    +-- ```
    +-- 
    +-- Then output is `[4 x 3]`:
    +-- 
    +--     ```output =
    +--       [5.0 0.0 0.0]  // one_hot(0)
    +--       [0.0 0.0 5.0]  // one_hot(2)
    +--       [0.0 0.0 0.0]  // one_hot(-1)
    +--       [0.0 5.0 0.0]  // one_hot(1)
    +--     ```
    +-- 
    +-- Suppose that
    +-- 
    +-- ```
    +--   indices = [0, 2, -1, 1]
    +--   depth = 3
    +--   on_value = 0.0
    +--   off_value = 3.0
    +--   axis = 0
    +-- ```
    +-- 
    +-- Then output is `[3 x 4]`:
    +-- 
    +--     ```output =
    +--       [0.0 3.0 3.0 3.0]
    +--       [3.0 3.0 3.0 0.0]
    +--       [3.0 3.0 3.0 3.0]
    +--       [3.0 0.0 3.0 3.0]
    +--     //  ^                one_hot(0)
    +--     //      ^            one_hot(2)
    +--     //          ^        one_hot(-1)
    +--     //              ^    one_hot(1)
    +--     ```
    +-- Suppose that
    +-- 
    +-- ```
    +--   indices = [[0, 2], [1, -1]]
    +--   depth = 3
    +--   on_value = 1.0
    +--   off_value = 0.0
    +--   axis = -1
    +-- ```
    +-- 
    +-- Then output is `[2 x 2 x 3]`:
    +-- 
    +--     ```output =
    +--       [
    +--         [1.0, 0.0, 0.0]  // one_hot(0)
    +--         [0.0, 0.0, 1.0]  // one_hot(2)
    +--       ][
    +--         [0.0, 1.0, 0.0]  // one_hot(1)
    +--         [0.0, 0.0, 0.0]  // one_hot(-1)
    +--       ]```
    +oneHot :: forall v1 v2 v3 v4 t tI . (TensorType t, TensorType tI,
    +                                     OneOf '[Data.Int.Int32, Data.Int.Int64,
    +                                             Data.Word.Word8] tI) =>
    +          Tensor v1 tI -- ^ __indices__: A tensor of indices.
    +          -> Tensor v2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension.
    +          -> Tensor v3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`.
    +          -> Tensor v4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`.
    +          -> Tensor Value t -- ^ __output__: The one-hot tensor.
    +oneHot indices depth on_value off_value | eqLengthGuard [] =
    +    buildOp (opDef "OneHot"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "TI" .~ tensorType (undefined :: tI))
    +        indices depth on_value off_value
    +{-
    +attr {
    +  default_value { i: -1 }
    +  description: "The axis to fill (default: -1, a new inner-most axis)."
    +  name: "axis"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values {
    +    list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 }
    +  }
    +  default_value { type: DT_INT64 }
    +  name: "TI"
    +  type: "type"
    +}
    +input_arg {
    +  description: "A tensor of indices." name: "indices" type_attr: "TI"
    +}
    +input_arg {
    +  description: "A scalar defining the depth of the one hot dimension."
    +  name: "depth"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A scalar defining the value to fill in output when `indices[j] = i`."
    +  name: "on_value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A scalar defining the value to fill in output when `indices[j] != i`."
    +  name: "off_value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The one-hot tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Return the reduction indices for computing gradients of s0 op s1 with broadcast.
    +--
    +-- This is typically used by gradient computations for a broadcasting operation.
    +broadcastGradientArgs :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                                 Data.Int.Int64] t) =>
    +                         Tensor v1 t -- ^ __s0__
    +                         -> Tensor v2 t -- ^ __s1__
    +                         -> (Tensor Value t, Tensor Value t)
    +                         -- ^ (__r0__, __r1__)
    +                         --
    +                         -- * __r0__
    +                         --
    +                         -- * __r1__
    +broadcastGradientArgs s0 s1 | eqLengthGuard [] =
    +    buildOp (opDef "BroadcastGradientArgs"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        s0 s1
    +{-
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "s0" type_attr: "T" }
    +input_arg { name: "s1" type_attr: "T" }
    +output_arg { name: "r0" type_attr: "T" }
    +output_arg { name: "r1" type_attr: "T" }
    +-}
    +
    +-- | Returns a batched matrix tensor with new batched diagonal values.
    +--
    +-- Given `input` and `diagonal`, this operation returns a tensor with the
    +-- same shape and values as `input`, except for the diagonals of the innermost
    +-- matrices.  These will be overwritten by the values in `diagonal`.
    +-- The batched matrices must be square.
    +-- 
    +-- The output is computed as follows:
    +-- 
    +-- Assume `input` has `k+1` dimensions `[I, J, K, ..., N, N]` and `diagonal` has
    +-- `k` dimensions `[I, J, K, ..., N]`.  Then the output is a
    +-- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
    +-- 
    +--   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
    +--   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
    +matrixSetDiag :: forall v1 v2 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __input__: Rank `k+1`, where `k >= 1`.
    +                 -> Tensor v2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
    +                 -> Tensor Value t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`.
    +matrixSetDiag input diagonal | eqLengthGuard [] =
    +    buildOp (opDef "MatrixSetDiag"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input diagonal
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "Rank `k+1`, where `k >= 1`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Rank `k`, where `k >= 1`."
    +  name: "diagonal"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Rank `k+1`, with `output.shape = input.shape`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Update '*var' according to the RMSProp algorithm.
    +--
    +-- Note that in dense implement of this algorithm, ms and mom will
    +-- update even if the grad is zero, but in this sparse implement, ms
    +-- and mom will not update in iterations the grad is zero.
    +-- 
    +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
    +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
    +-- 
    +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
    +-- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
    +-- var <- var - mom
    +applyRMSProp :: forall v1 v2 v3 v4 v5 v6 v7 v8 t . (TensorType t,
    +                                                    OneOf '[(Data.Complex.Complex Double),
    +                                                            (Data.Complex.Complex Float),
    +                                                            Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double, Float] t) =>
    +                Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                -> Tensor v2 t -- ^ __ms__: Should be from a Variable().
    +                -> Tensor v3 t -- ^ __mom__: Should be from a Variable().
    +                -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                -> Tensor v5 t -- ^ __rho__: Decay rate. Must be a scalar.
    +                -> Tensor v6 t -- ^ __momentum__
    +                -> Tensor v7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
    +                -> Tensor v8 t -- ^ __grad__: The gradient.
    +                -> Tensor Value t -- ^ __out__: Same as "var".
    +applyRMSProp var ms mom lr rho momentum epsilon grad | eqLengthGuard [] =
    +    buildOp (opDef "ApplyRMSProp"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        var ms mom lr rho momentum epsilon grad
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "ms"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "mom"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Decay rate. Must be a scalar."
    +  name: "rho"
    +  type_attr: "T"
    +}
    +input_arg { name: "momentum" type_attr: "T" }
    +input_arg {
    +  description: "Ridge term. Must be a scalar."
    +  name: "epsilon"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns a constant tensor.
    +
    +const :: forall dtype . (TensorType dtype) => Tensor Value dtype -- ^ __output__
    +const  | eqLengthGuard [] =
    +    buildOp (opDef "Const"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        
    +{-
    +attr {
    +  description: "Attr `value` is the tensor to return."
    +  name: "value"
    +  type: "tensor"
    +}
    +attr { name: "dtype" type: "type" }
    +output_arg { name: "output" type_attr: "dtype" }
    +-}
    +
    +-- | Creates or finds a child frame, and makes `data` available to the child frame.
    +--
    +-- This op is used together with `Exit` to create loops in the graph.
    +-- The unique `frame_name` is used by the `Executor` to identify frames. If
    +-- `is_constant` is true, `output` is a constant in the child frame; otherwise
    +-- it may be changed in the child frame. At most `parallel_iterations` iterations
    +-- are run in parallel in the child frame.
    +enter :: forall v1 t . (TensorType t) =>
    +         Tensor v1 t -- ^ __data__: The tensor to be made available to the child frame.
    +         -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +enter data' | eqLengthGuard [] =
    +    buildOp (opDef "Enter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The name of the child frame."
    +  name: "frame_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, the output is constant within the child frame."
    +  name: "is_constant"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 10 }
    +  description: "The number of iterations allowed to run in parallel."
    +  name: "parallel_iterations"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The tensor to be made available to the child frame."
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Debug Identity Op.
    +--
    +-- Provides an identity mapping of the non-Ref type input tensor for debugging.
    +debugIdentity :: forall v1 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __input__: Input tensor, non-Reference type.
    +                 -> Tensor Value t -- ^ __output__: Output tensor that equals the input tensor.
    +debugIdentity input | eqLengthGuard [] =
    +    buildOp (opDef "DebugIdentity"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { s: "" }
    +  description: "Name of the input tensor."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
    +  name: "debug_urls"
    +  type: "list(string)"
    +}
    +input_arg {
    +  description: "Input tensor, non-Reference type."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Output tensor that equals the input tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Debug NaN Value Counter Op
    +--
    +-- Counts number of NaNs in the input tensor, for debugging.
    +debugNanCount :: forall v1 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __input__: Input tensor, non-Reference type.
    +                 -> Tensor Value Data.Int.Int64 -- ^ __output__: An integer output tensor that is the number of NaNs in the input.
    +debugNanCount input | eqLengthGuard [] =
    +    buildOp (opDef "DebugNanCount"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { s: "" }
    +  description: "Name of the input tensor."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
    +  name: "debug_urls"
    +  type: "list(string)"
    +}
    +input_arg {
    +  description: "Input tensor, non-Reference type."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "An integer output tensor that is the number of NaNs in the input."
    +  name: "output"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Batch normalization.
    +--
    +-- This op is deprecated. Prefer `tf.nn.batch_normalization`.
    +batchNormWithGlobalNormalization :: forall v1 v2 v3 v4 v5 t . (TensorType t,
    +                                                               OneOf '[(Data.Complex.Complex Double),
    +                                                                       (Data.Complex.Complex Float),
    +                                                                       Data.Int.Int16,
    +                                                                       Data.Int.Int32,
    +                                                                       Data.Int.Int64,
    +                                                                       Data.Int.Int8,
    +                                                                       Data.Word.Word16,
    +                                                                       Data.Word.Word8,
    +                                                                       Double,
    +                                                                       Float] t) =>
    +                                    Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
    +                                         -- needs to be multiplied with gamma.
    +                                    -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
    +                                    -> Tensor v1 t -- ^ __t__: A 4D input Tensor.
    +                                    -> Tensor v2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
    +                                                   -- This is the first output from tf.nn.moments,
    +                                                   -- or a saved moving average thereof.
    +                                    -> Tensor v3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
    +                                                   -- This is the second output from tf.nn.moments,
    +                                                   -- or a saved moving average thereof.
    +                                    -> Tensor v4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
    +                                                   -- An offset to be added to the normalized tensor.
    +                                    -> Tensor v5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
    +                                                   -- If "scale_after_normalization" is true, this tensor will be multiplied
    +                                                   -- with the normalized tensor.
    +                                    -> Tensor Value t -- ^ __result__
    +batchNormWithGlobalNormalization scale_after_normalization variance_epsilon t m
    +                                 v beta gamma | eqLengthGuard [] =
    +    buildOp (opDef "BatchNormWithGlobalNormalization"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "scale_after_normalization" .~ scale_after_normalization
    +             & opAttr "variance_epsilon" .~ variance_epsilon)
    +        t m v beta gamma
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "A small float number to avoid dividing by 0."
    +  name: "variance_epsilon"
    +  type: "float"
    +}
    +attr {
    +  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
    +  name: "scale_after_normalization"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "A 4D input Tensor." name: "t" type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
    +  name: "m"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
    +  name: "v"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
    +  name: "beta"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
    +  name: "gamma"
    +  type_attr: "T"
    +}
    +output_arg { name: "result" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchMatrixDiag :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __diagonal__
    +                   -> Tensor Value t -- ^ __output__
    +batchMatrixDiag diagonal | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixDiag"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        diagonal
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "diagonal" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
    +--
    +-- Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
    +-- For example, given a tensor of shape `(A, B, C, D)`;
    +-- 
    +-- If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
    +--   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
    +--   dimension unpacked along is gone, unlike `split`).
    +-- 
    +-- If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
    +--   and each tensor in `output` will have shape `(A, C, D)`.
    +-- Etc.
    +-- 
    +-- This is the opposite of `pack`.
    +unpack :: forall v1 t . (TensorType t) => Data.Int.Int64 -- ^ __num__
    +          -> Tensor v1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`.
    +          -> [Tensor Value t] -- ^ __output__: The list of tensors unpacked from `value`.
    +unpack num value | eqLengthGuard [] =
    +    buildListOp [num] (opDef "Unpack"
    +                       & opAttr "T" .~ tensorType (undefined :: t)
    +                       & opAttr "num" .~ num)
    +        value
    +{-
    +attr { has_minimum: true name: "num" type: "int" }
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { i: 0 }
    +  description: "Dimension along which to unpack.  Negative values wrap around, so the\nvalid range is `[-R, R)`."
    +  name: "axis"
    +  type: "int"
    +}
    +input_arg {
    +  description: "1-D or higher, with `axis` dimension size equal to `num`."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The list of tensors unpacked from `value`."
    +  name: "output"
    +  number_attr: "num"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Split a `SparseTensor` into `num_split` tensors along one dimension.
    +--
    +-- If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
    +-- `[0 : shape[split_dim] % num_split]` gets one extra dimension.
    +-- For example, if `split_dim = 1` and `num_split = 2` and the input is
    +-- 
    +--     input_tensor = shape = [2, 7]
    +--     [    a   d e  ]
    +--     [b c          ]
    +-- 
    +-- Graphically the output tensors are:
    +-- 
    +--     output_tensor[0] = shape = [2, 4]
    +--     [    a  ]
    +--     [b c    ]
    +-- 
    +--     output_tensor[1] = shape = [2, 3]
    +--     [ d e  ]
    +--     [      ]
    +sparseSplit :: forall v1 v2 v3 v4 t . (TensorType t) =>
    +               Data.Int.Int64 -- ^ __num_split__: The number of ways to split.
    +               -> Tensor v1 Data.Int.Int64 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
    +                                           -- `[0, rank(shape))`.
    +               -> Tensor v2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
    +               -> Tensor v3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
    +               -> Tensor v4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
    +                                           -- output indices: A list of 1-D tensors represents the indices of the output
    +                                           -- sparse tensors.
    +               -> ([Tensor Value Data.Int.Int64], [Tensor Value t],
    +                   [Tensor Value Data.Int.Int64])
    +               -- ^ (__output_indices__, __output_values__, __output_shape__)
    +               --
    +               -- * __output_indices__
    +               --
    +               -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
    +               -- tensors.
    +               --
    +               -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
    +               -- tensors.
    +sparseSplit num_split split_dim indices values shape | eqLengthGuard [] =
    +    buildListOp [num_split, num_split, num_split] (opDef "SparseSplit"
    +                                                   & opAttr "T" .~ tensorType (undefined :: t)
    +                                                   & opAttr "num_split" .~ num_split)
    +        split_dim indices values shape
    +{-
    +attr {
    +  description: "The number of ways to split."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_split"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(shape))`."
    +  name: "split_dim"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "2-D tensor represents the indices of the sparse tensor."
    +  name: "indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D tensor represents the values of the sparse tensor."
    +  name: "values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors."
    +  name: "shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  name: "output_indices" number_attr: "num_split" type: DT_INT64
    +}
    +output_arg {
    +  description: "A list of 1-D tensors represents the values of the output sparse\ntensors."
    +  name: "output_values"
    +  number_attr: "num_split"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A list of 1-D tensors represents the shape of the output sparse\ntensors."
    +  name: "output_shape"
    +  number_attr: "num_split"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Pads a tensor with mirrored values.
    +--
    +-- This operation pads a `input` with mirrored values according to the `paddings`
    +-- you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
    +-- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
    +-- how many values to add before the contents of `input` in that dimension, and
    +-- `paddings[D, 1]` indicates how many values to add after the contents of `input`
    +-- in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
    +-- than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
    +-- (if false, respectively).
    +-- 
    +-- The padded size of each dimension D of the output is:
    +-- 
    +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[1, 2, 3], [4, 5, 6]].
    +-- # 'paddings' is [[1, 1]], [2, 2]].
    +-- # 'mode' is SYMMETRIC.
    +-- # rank of 't' is 2.
    +-- pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
    +--                       [2, 1, 1, 2, 3, 3, 2]
    +--                       [5, 4, 4, 5, 6, 6, 5]
    +--                       [5, 4, 4, 5, 6, 6, 5]]
    +-- ```
    +mirrorPad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
    +                                         OneOf '[Data.Int.Int32,
    +                                                 Data.Int.Int64] tpaddings) =>
    +             Tensor v1 t -- ^ __input__: The input tensor to be padded.
    +             -> Tensor v2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
    +                                    -- rows must be the same as the rank of `input`.
    +             -> Tensor Value t -- ^ __output__: The padded tensor.
    +mirrorPad input paddings | eqLengthGuard [] =
    +    buildOp (opDef "MirrorPad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
    +        input paddings
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tpaddings"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
    +  description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode."
    +  name: "mode"
    +  type: "string"
    +}
    +input_arg {
    +  description: "The input tensor to be padded."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
    +  name: "paddings"
    +  type_attr: "Tpaddings"
    +}
    +output_arg {
    +  description: "The padded tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | 
    +
    +batchMatrixDiagPart :: forall v1 t . (TensorType t) =>
    +                       Tensor v1 t -- ^ __input__
    +                       -> Tensor Value t -- ^ __diagonal__
    +batchMatrixDiagPart input | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixDiagPart"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "diagonal" type_attr: "T" }
    +-}
    +
    +-- | Computes gradient of the FractionalMaxPool function.
    +
    +fractionalMaxPoolGrad :: forall v1 v2 v3 v4 v5 t . (TensorType t,
    +                                                    OneOf '[Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Double, Float] t) =>
    +                         Tensor v1 t -- ^ __orig_input__: Original input for `fractional_max_pool`
    +                         -> Tensor v2 t -- ^ __orig_output__: Original output for `fractional_max_pool`
    +                         -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
    +                                        -- w.r.t. the output of `fractional_max_pool`.
    +                         -> Tensor v4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
    +                                                     -- col_pooling_sequence.
    +                         -> Tensor v5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
    +                                                     -- row_pooling sequence.
    +                         -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
    +fractionalMaxPoolGrad orig_input orig_output out_backprop row_pooling_sequence
    +                      col_pooling_sequence | eqLengthGuard [] =
    +    buildOp (opDef "FractionalMaxPoolGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input orig_output out_backprop row_pooling_sequence
    +        col_pooling_sequence
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
    +  name: "overlapping"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Original input for `fractional_max_pool`"
    +  name: "orig_input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Original output for `fractional_max_pool`"
    +  name: "orig_output"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_max_pool`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
    +  name: "row_pooling_sequence"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
    +  name: "col_pooling_sequence"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "4-D.  Gradients w.r.t. the input of `fractional_max_pool`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns the set of files matching a pattern.
    +--
    +-- Note that this routine only supports wildcard characters in the
    +-- basename portion of the pattern, not in the directory portion.
    +matchingFiles :: Tensor v1 Data.ByteString.ByteString -- ^ __pattern__: A (scalar) shell wildcard pattern.
    +                 -> Tensor Value Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames.
    +matchingFiles pattern | eqLengthGuard [] =
    +    buildOp (opDef "MatchingFiles")
    +        pattern
    +{-
    +input_arg {
    +  description: "A (scalar) shell wildcard pattern."
    +  name: "pattern"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A vector of matching filenames."
    +  name: "filenames"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Constructs a tensor by tiling a given tensor.
    +--
    +-- This operation creates a new tensor by replicating `input` `multiples` times.
    +-- The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
    +-- and the values of `input` are replicated `multiples[i]` times along the 'i'th
    +-- dimension. For example, tiling `[a b c d]` by `[2]` produces
    +-- `[a b c d a b c d]`.
    +tile :: forall v1 v2 t tmultiples . (TensorType t, TensorType tmultiples,
    +                                     OneOf '[Data.Int.Int32,
    +                                             Data.Int.Int64] tmultiples) =>
    +        Tensor v1 t -- ^ __input__: 1-D or higher.
    +        -> Tensor v2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input`
    +        -> Tensor Value t -- ^ __output__
    +tile input multiples | eqLengthGuard [] =
    +    buildOp (opDef "Tile"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tmultiples" .~ tensorType (undefined :: tmultiples))
    +        input multiples
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tmultiples"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D or higher." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D. Length must be the same as the number of dimensions in `input`"
    +  name: "multiples"
    +  type_attr: "Tmultiples"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns the element-wise min of two SparseTensors.
    +--
    +-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
    +sparseSparseMinimum :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t,
    +                                                     OneOf '[(Data.Complex.Complex Double),
    +                                                             (Data.Complex.Complex Float),
    +                                                             Data.Int.Int16,
    +                                                             Data.Int.Int32,
    +                                                             Data.Int.Int64,
    +                                                             Data.Int.Int8,
    +                                                             Data.Word.Word16,
    +                                                             Data.Word.Word8,
    +                                                             Double,
    +                                                             Float] t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                -- SparseTensor, in the canonical lexicographic ordering.
    +                       -> Tensor v2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
    +                       -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
    +                       -> Tensor v5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
    +                       -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
    +                       -> (Tensor Value Data.Int.Int64, Tensor Value t)
    +                       -- ^ (__output_indices__, __output_values__)
    +                       --
    +                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
    +                       --
    +                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
    +sparseSparseMinimum a_indices a_values a_shape b_indices b_values
    +                    b_shape | eqLengthGuard [] =
    +    buildOp (opDef "SparseSparseMinimum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a_indices a_values a_shape b_indices b_values b_shape
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
    +  name: "a_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
    +  name: "a_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "a_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "counterpart to `a_indices` for the other operand."
    +  name: "b_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
    +  name: "b_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
    +  name: "b_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "2-D.  The indices of the output SparseTensor."
    +  name: "output_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  The values of the output SparseTensor."
    +  name: "output_values"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Generates labels for candidate sampling with a learned unigram distribution.
    +--
    +-- See explanations of candidate sampling and the data formats at
    +-- go/candidate-sampling.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +allCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce per batch.
    +                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                               -- candidates in a batch are unique. This requires some approximation to
    +                               -- estimate the post-rejection sampling probabilities.
    +                       -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                   -- IDs of the num_true target_classes in the corresponding original label.
    +                       -> (Tensor Value Data.Int.Int64, Tensor Value Float,
    +                           Tensor Value Float)
    +                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                       --
    +                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                       -- the ID of a sampled candidate.
    +                       --
    +                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                       -- the number of times each candidate is expected to occur in a batch
    +                       -- of sampled candidates. If unique=true, then this is a probability.
    +                       --
    +                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                       -- candidate representing the number of times the candidate is expected
    +                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                       -- probability.
    +allCandidateSampler num_sampled num_true unique
    +                    true_classes | eqLengthGuard [] =
    +    buildOp (opDef "AllCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to produce per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Forwards the ref tensor `data` to the output port determined by `pred`.
    +--
    +-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
    +-- the data goes to `output_false`.
    +-- 
    +-- See also `Switch` and `Merge`.
    +refSwitch :: forall v1 v2 t . (TensorType t) =>
    +             Tensor v1 t -- ^ __data__: The ref tensor to be forwarded to the appropriate output.
    +             -> Tensor v2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
    +             -> (Tensor Value t, Tensor Value t)
    +             -- ^ (__output_false__, __output_true__)
    +             --
    +             -- * __output_false__: If `pred` is false, data will be forwarded to this output.
    +             --
    +             -- * __output_true__: If `pred` is true, data will be forwarded to this output.
    +refSwitch data' pred | eqLengthGuard [] =
    +    buildOp (opDef "RefSwitch"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data' pred
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The ref tensor to be forwarded to the appropriate output."
    +  is_ref: true
    +  name: "data"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A scalar that specifies which output port will receive data."
    +  name: "pred"
    +  type: DT_BOOL
    +}
    +output_arg {
    +  description: "If `pred` is false, data will be forwarded to this output."
    +  is_ref: true
    +  name: "output_false"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "If `pred` is true, data will be forwarded to this output."
    +  is_ref: true
    +  name: "output_true"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Merges summaries.
    +--
    +-- This op creates a
    +-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
    +-- protocol buffer that contains the union of all the values in the input
    +-- summaries.
    +-- 
    +-- When the Op is run, it reports an `InvalidArgument` error if multiple values
    +-- in the summaries to merge use the same tag.
    +mergeSummary :: [Tensor v1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape.  Each must contain serialized `Summary` protocol
    +                                                       -- buffers.
    +                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
    +mergeSummary inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "MergeSummary"
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        inputs
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +input_arg {
    +  description: "Can be of any shape.  Each must contain serialized `Summary` protocol\nbuffers."
    +  name: "inputs"
    +  number_attr: "N"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "Scalar. Serialized `Summary` protocol buffer."
    +  name: "summary"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Returns the truth value of NOT x element-wise.
    +
    +logicalNot :: Tensor v1 Bool -- ^ __x__
    +              -> Tensor Value Bool -- ^ __y__
    +logicalNot x | eqLengthGuard [] =
    +    buildOp (opDef "LogicalNot")
    +        x
    +{-
    +input_arg { name: "x" type: DT_BOOL }
    +output_arg { name: "y" type: DT_BOOL }
    +-}
    +
    +-- | Gradients for Local Response Normalization.
    +
    +lRNGrad :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                      Float] t) =>
    +           Tensor v1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`.
    +           -> Tensor v2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`.
    +           -> Tensor v3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`.
    +           -> Tensor Value t -- ^ __output__: The gradients for LRN.
    +lRNGrad input_grads input_image output_image | eqLengthGuard [] =
    +    buildOp (opDef "LRNGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_grads input_image output_image
    +{-
    +attr {
    +  default_value { i: 5 }
    +  description: "A depth radius."
    +  name: "depth_radius"
    +  type: "int"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "An offset (usually > 0 to avoid dividing by 0)."
    +  name: "bias"
    +  type: "float"
    +}
    +attr {
    +  default_value { f: 1.0 }
    +  description: "A scale factor, usually positive."
    +  name: "alpha"
    +  type: "float"
    +}
    +attr {
    +  default_value { f: 0.5 }
    +  description: "An exponent."
    +  name: "beta"
    +  type: "float"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "input_grads"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "input_image"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "output_image"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The gradients for LRN." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Converts each string in the input Tensor to the specified numeric type.
    +--
    +-- (Note that int32 overflow results in an error while float overflow
    +-- results in a rounded value.)
    +stringToNumber :: forall v1 out_type . (TensorType out_type,
    +                                        OneOf '[Data.Int.Int32,
    +                                                Float] out_type) =>
    +                  Tensor v1 Data.ByteString.ByteString -- ^ __string_tensor__
    +                  -> Tensor Value out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
    +stringToNumber string_tensor | eqLengthGuard [] =
    +    buildOp (opDef "StringToNumber"
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type))
    +        string_tensor
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_INT32 } }
    +  default_value { type: DT_FLOAT }
    +  description: "The numeric type to interpret each string in string_tensor as."
    +  name: "out_type"
    +  type: "type"
    +}
    +input_arg { name: "string_tensor" type: DT_STRING }
    +output_arg {
    +  description: "A Tensor of the same shape as the input `string_tensor`."
    +  name: "output"
    +  type_attr: "out_type"
    +}
    +-}
    +
    +-- | Multiply matrix "a" by matrix "b".
    +--
    +-- The inputs must be two-dimensional matrices and the inner dimension of "a" must
    +-- match the outer dimension of "b". This op is optimized for the case where at
    +-- least one of "a" or "b" is sparse. The breakeven for using this versus a dense
    +-- matrix multiply on one platform was 30% zero values in the sparse matrix.
    +sparseMatMul :: forall v1 v2 ta tb . (TensorType ta, OneOf '[Data.Word.Word16,
    +                                                             Float] ta,
    +                                      TensorType tb, OneOf '[Data.Word.Word16,
    +                                                             Float] tb) =>
    +                Tensor v1 ta -- ^ __a__
    +                -> Tensor v2 tb -- ^ __b__
    +                -> Tensor Value Float -- ^ __product__
    +sparseMatMul a b | eqLengthGuard [] =
    +    buildOp (opDef "SparseMatMul"
    +             & opAttr "Ta" .~ tensorType (undefined :: ta)
    +             & opAttr "Tb" .~ tensorType (undefined :: tb))
    +        a b
    +{-
    +attr {
    +  default_value { b: false } name: "transpose_a" type: "bool"
    +}
    +attr {
    +  default_value { b: false } name: "transpose_b" type: "bool"
    +}
    +attr {
    +  default_value { b: false } name: "a_is_sparse" type: "bool"
    +}
    +attr {
    +  default_value { b: false } name: "b_is_sparse" type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
    +  default_value { type: DT_FLOAT }
    +  name: "Ta"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
    +  default_value { type: DT_FLOAT }
    +  name: "Tb"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "Ta" }
    +input_arg { name: "b" type_attr: "Tb" }
    +output_arg { name: "product" type: DT_FLOAT }
    +-}
    +
    +-- | Forwards the value of an available tensor from `inputs` to `output`.
    +--
    +-- `Merge` waits for at least one of the tensors in `inputs` to become available.
    +-- It is usually combined with `Switch` to implement branching.
    +-- 
    +-- `Merge` forwards the first tensor for become available to `output`, and sets
    +-- `value_index` to its index in `inputs`.
    +merge :: forall v1 t . (TensorType t) =>
    +         [Tensor v1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
    +         -> (Tensor Value t, Tensor Value Data.Int.Int32)
    +         -- ^ (__output__, __value_index__)
    +         --
    +         -- * __output__: Will be set to the available input tensor.
    +         --
    +         -- * __value_index__: The index of the chosen input tensor in `inputs`.
    +merge inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "Merge"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        inputs
    +{-
    +attr { name: "T" type: "type" }
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +input_arg {
    +  description: "The input tensors, exactly one of which will become available."
    +  name: "inputs"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Will be set to the available input tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The index of the chosen input tensor in `inputs`."
    +  name: "value_index"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
    +--
    +-- For an explanation see "Differentiation of the Cholesky algorithm" by
    +-- Iain Murray http://arxiv.org/abs/1602.07527.
    +choleskyGrad :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                Tensor v1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
    +                            -- Algorithm depends only on lower triangular part of the innermost matrices of
    +                            -- this tensor.
    +                -> Tensor v2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`.
    +                               -- Algorithm depends only on lower triangular part of the innermost matrices of
    +                               -- this tensor.
    +                -> Tensor Value t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]`
    +choleskyGrad l grad | eqLengthGuard [] =
    +    buildOp (opDef "CholeskyGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        l grad
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
    +  name: "l"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Symmetrized version of df/dA . Shape is `[..., M, M]`"
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | 
    +
    +batchCholeskyGrad :: forall v1 v2 t . (TensorType t, OneOf '[Double,
    +                                                             Float] t) =>
    +                     Tensor v1 t -- ^ __l__
    +                     -> Tensor v2 t -- ^ __grad__
    +                     -> Tensor Value t -- ^ __output__
    +batchCholeskyGrad l grad | eqLengthGuard [] =
    +    buildOp (opDef "BatchCholeskyGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        l grad
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "l" type_attr: "T" }
    +input_arg { name: "grad" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Gather specific elements from the TensorArray into output `value`.
    +--
    +-- All elements selected by `indices` must have the same shape.
    +tensorArrayGather :: forall v1 v2 v3 dtype . (TensorType dtype) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
    +                     -> Tensor v2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements.
    +                     -> Tensor v3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                     -> Tensor Value dtype -- ^ __value__: All of the elements in the TensorArray, concatenated along a new
    +                     -- axis (the new dimension 0).
    +tensorArrayGather handle indices flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayGather"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        handle indices flow_in
    +{-
    +attr {
    +  description: "The type of the elem that is returned."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { shape { unknown_rank: true } }
    +  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error."
    +  name: "element_shape"
    +  type: "shape"
    +}
    +input_arg {
    +  description: "The handle to a TensorArray."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "The locations in the TensorArray from which to read tensor elements."
    +  name: "indices"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)."
    +  name: "value"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Resize `images` to `size` using nearest neighbor interpolation.
    +
    +resizeNearestNeighbor :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                                 Data.Int.Int32,
    +                                                                 Data.Int.Int64,
    +                                                                 Data.Int.Int8,
    +                                                                 Data.Word.Word16,
    +                                                                 Data.Word.Word8,
    +                                                                 Double,
    +                                                                 Float] t) =>
    +                         Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
    +                         -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
    +                                                     -- new size for the images.
    +                         -> Tensor Value t -- ^ __resized_images__: 4-D with shape
    +                         -- `[batch, new_height, new_width, channels]`.
    +resizeNearestNeighbor images size | eqLengthGuard [] =
    +    buildOp (opDef "ResizeNearestNeighbor"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "images"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
    +  name: "size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
    +  name: "resized_images"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Training via negative sampling.
    +
    +negTrain :: Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example.
    +            -> Tensor v1 Float -- ^ __w_in__: input word embedding.
    +            -> Tensor v2 Float -- ^ __w_out__: output word embedding.
    +            -> Tensor v3 Data.Int.Int32 -- ^ __examples__: A vector of word ids.
    +            -> Tensor v4 Data.Int.Int32 -- ^ __labels__: A vector of word ids.
    +            -> Tensor v5 Float -- ^ __lr__
    +            -> ControlNode
    +negTrain num_negative_samples w_in w_out examples labels lr | eqLengthGuard [] =
    +    buildOp (opDef "NegTrain"
    +             & opAttr "num_negative_samples" .~ num_negative_samples)
    +        w_in w_out examples labels lr
    +{-
    +attr {
    +  description: "Count of words in the vocabulary."
    +  name: "vocab_count"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "Number of negative samples per example."
    +  name: "num_negative_samples"
    +  type: "int"
    +}
    +input_arg {
    +  description: "input word embedding."
    +  is_ref: true
    +  name: "w_in"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "output word embedding."
    +  is_ref: true
    +  name: "w_out"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A vector of word ids."
    +  name: "examples"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A vector of word ids." name: "labels" type: DT_INT32
    +}
    +input_arg { name: "lr" type: DT_FLOAT }
    +-}
    +
    +-- | Creates a TensorArray for storing the gradients of values in the given handle.
    +--
    +-- If the given TensorArray gradient already exists, returns a reference to it.
    +-- 
    +-- Locks the size of the original TensorArray by disabling its dynamic size flag.
    +-- 
    +-- **A note about the input flow_in:**
    +-- 
    +-- The handle flow_in forces the execution of the gradient lookup to occur
    +-- only after certain other operations have occurred.  For example, when
    +-- the forward TensorArray is dynamically sized, writes to this TensorArray
    +-- may resize the object.  The gradient TensorArray is statically sized based
    +-- on the size of the forward TensorArray when this operation executes.
    +-- Furthermore, the size of the forward TensorArray is frozen by this call.
    +-- As a result, the flow is used to ensure that the call to generate the gradient
    +-- TensorArray only happens after all writes are executed.
    +-- 
    +-- In the case of dynamically sized TensorArrays, gradient computation should
    +-- only be performed on read operations that have themselves been chained via
    +-- flow to occur only after all writes have executed. That way the final size
    +-- of the forward TensorArray is known when this operation is called.
    +-- 
    +-- **A note about the source attribute:**
    +-- 
    +-- TensorArray gradient calls use an accumulator TensorArray object.  If
    +-- multiple gradients are calculated and run in the same session, the multiple
    +-- gradient nodes may accidentally flow throuth the same accumulator TensorArray.
    +-- This double counts and generally breaks the TensorArray gradient flow.
    +-- 
    +-- The solution is to identify which gradient call this particular
    +-- TensorArray gradient is being called in.  This is performed by identifying
    +-- a unique string (e.g. "gradients", "gradients_1", ...) from the input
    +-- gradient Tensor's name.  This string is used as a suffix when creating
    +-- the TensorArray gradient object here (the attribute `source`).
    +-- 
    +-- The attribute `source` is added as a suffix to the forward TensorArray's
    +-- name when performing the creation / lookup, so that each separate gradient
    +-- calculation gets its own TensorArray accumulator.
    +tensorArrayGrad :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to the forward TensorArray.
    +                   -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
    +                   -> Tensor Value Data.ByteString.ByteString -- ^ __grad_handle__
    +tensorArrayGrad handle flow_in | eqLengthGuard [] =
    +    buildOp (opDef "TensorArrayGrad")
    +        handle flow_in
    +{-
    +attr {
    +  description: "The gradient source string, used to decide which gradient TensorArray\nto return."
    +  name: "source"
    +  type: "string"
    +}
    +input_arg {
    +  description: "The handle to the forward TensorArray."
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "A float scalar that enforces proper chaining of operations."
    +  name: "flow_in"
    +  type: DT_FLOAT
    +}
    +output_arg { is_ref: true name: "grad_handle" type: DT_STRING }
    +-}
    +
    +-- | Outputs a `Summary` protocol buffer with audio.
    +--
    +-- The summary has up to `max_outputs` summary values containing audio. The
    +-- audio is built from `tensor` which must be 3-D with shape `[batch_size,
    +-- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
    +-- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
    +-- 
    +-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
    +-- build the `tag` of the summary values:
    +-- 
    +-- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
    +-- *  If `max_outputs` is greater than 1, the summary value tags are
    +--    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
    +audioSummary :: Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
    +                -> Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
    +                -> Tensor v2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
    +                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
    +audioSummary sample_rate tag tensor | eqLengthGuard [] =
    +    buildOp (opDef "AudioSummary"
    +             & opAttr "sample_rate" .~ sample_rate)
    +        tag tensor
    +{-
    +attr {
    +  description: "The sample rate of the signal in hertz."
    +  name: "sample_rate"
    +  type: "float"
    +}
    +attr {
    +  default_value { i: 3 }
    +  description: "Max number of batch elements to generate audio for."
    +  has_minimum: true
    +  minimum: 1
    +  name: "max_outputs"
    +  type: "int"
    +}
    +input_arg {
    +  description: "Scalar. Used to build the `tag` attribute of the summary values."
    +  name: "tag"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "2-D of shape `[batch_size, frames]`."
    +  name: "tensor"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "Scalar. Serialized `Summary` protocol buffer."
    +  name: "summary"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Does nothing. Only useful as a placeholder for control edges.
    +
    +noOp :: ControlNode
    +noOp  | eqLengthGuard [] =
    +    buildOp (opDef "NoOp")
    +        
    +{-
    +
    +-}
    +
    +-- | Makes its input available to the next iteration.
    +
    +nextIteration :: forall v1 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __data__: The tensor to be made available to the next iteration.
    +                 -> Tensor Value t -- ^ __output__: The same tensor as `data`.
    +nextIteration data' | eqLengthGuard [] =
    +    buildOp (opDef "NextIteration"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        data'
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be made available to the next iteration."
    +  name: "data"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The same tensor as `data`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes softplus gradients for a softplus operation.
    +
    +softplusGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                        Data.Int.Int32,
    +                                                        Data.Int.Int64,
    +                                                        Data.Int.Int8,
    +                                                        Data.Word.Word16,
    +                                                        Data.Word.Word8, Double,
    +                                                        Float] t) =>
    +                Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation.
    +                -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding softplus operation.
    +                -> Tensor Value t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`.
    +softplusGrad gradients features | eqLengthGuard [] =
    +    buildOp (opDef "SoftplusGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        gradients features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The backpropagated gradients to the corresponding softplus operation."
    +  name: "gradients"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The features passed as input to the corresponding softplus operation."
    +  name: "features"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The gradients: `gradients / (1 + exp(-features))`."
    +  name: "backprops"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the singular value decompositions of one or more matrices.
    +--
    +-- Computes the SVD of each inner matrix in `input` such that
    +-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
    +-- 
    +-- ```prettyprint
    +-- # a is a tensor containing a batch of matrices.
    +-- # s is a tensor of singular values for each matrix.
    +-- # u is the tensor containing of left singular vectors for each matrix.
    +-- # v is the tensor containing of right singular vectors for each matrix.
    +-- s, u, v = svd(a)
    +-- s, _, _ = svd(a, compute_uv=False)
    +-- ```
    +svd :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Double, Float] t) =>
    +       Tensor v1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
    +                   -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
    +       -> (Tensor Value t, Tensor Value t, Tensor Value t)
    +       -- ^ (__s__, __u__, __v__)
    +       --
    +       -- * __s__: Singular values. Shape is `[..., P]`.
    +       --
    +       -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is
    +       -- `[..., M, M]`; if `full_matrices` is `True` then shape is
    +       -- `[..., M, P]`. Undefined if `compute_uv` is `False`.
    +       --
    +       -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is
    +       -- `[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.
    +       -- Undefined if `compute_uv` is false.
    +svd input | eqLengthGuard [] =
    +    buildOp (opDef "Svd"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced."
    +  name: "compute_uv"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`."
    +  name: "full_matrices"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_DOUBLE
    +      type: DT_FLOAT
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Singular values. Shape is `[..., P]`."
    +  name: "s"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, M]`; if `full_matrices` is `True` then shape is\n`[..., M, P]`. Undefined if `compute_uv` is `False`."
    +  name: "u"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.\nUndefined if `compute_uv` is false."
    +  name: "v"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Convert one or more images from HSV to RGB.
    +--
    +-- Outputs a tensor of the same shape as the `images` tensor, containing the RGB
    +-- value of the pixels. The output is only well defined if the value in `images`
    +-- are in `[0,1]`.
    +-- 
    +-- See `rgb_to_hsv` for a description of the HSV encoding.
    +hSVToRGB :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +            Tensor v1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
    +            -> Tensor Value t -- ^ __output__: `images` converted to RGB.
    +hSVToRGB images | eqLengthGuard [] =
    +    buildOp (opDef "HSVToRGB"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3."
    +  name: "images"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "`images` converted to RGB."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Outputs random values from a normal distribution. The parameters may each be a
    +--
    +-- scalar which applies to the entire output, or a vector of length shape[0] which
    +-- stores the parameters for each batch.
    +parameterizedTruncatedNormal :: forall v1 v2 v3 v4 v5 t dtype . (TensorType t,
    +                                                                 OneOf '[Data.Int.Int32,
    +                                                                         Data.Int.Int64] t,
    +                                                                 TensorType dtype,
    +                                                                 OneOf '[Data.Word.Word16,
    +                                                                         Double,
    +                                                                         Float] dtype) =>
    +                                Tensor v1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension.
    +                                -> Tensor v2 dtype -- ^ __means__: The mean parameter of each batch.
    +                                -> Tensor v3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0.
    +                                -> Tensor v4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity.
    +                                -> Tensor v5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval
    +                                                   -- for each batch.
    +                                -> Tensor Value dtype -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random
    +                                -- truncated normal values using the parameters for each row.
    +parameterizedTruncatedNormal shape means stdevs minvals
    +                             maxvals | eqLengthGuard [] =
    +    buildOp (opDef "ParameterizedTruncatedNormal"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        shape means stdevs minvals maxvals
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  description: "The type of the output."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The shape of the output tensor. Batches are indexed by the 0th dimension."
    +  name: "shape"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The mean parameter of each batch."
    +  name: "means"
    +  type_attr: "dtype"
    +}
    +input_arg {
    +  description: "The standard deviation parameter of each batch. Must be greater than 0."
    +  name: "stdevs"
    +  type_attr: "dtype"
    +}
    +input_arg {
    +  description: "The minimum cutoff. May be -infinity."
    +  name: "minvals"
    +  type_attr: "dtype"
    +}
    +input_arg {
    +  description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch."
    +  name: "maxvals"
    +  type_attr: "dtype"
    +}
    +output_arg {
    +  description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes square of x element-wise.
    +--
    +-- I.e., \\(y = x * x = x^2\\).
    +square :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int32, Data.Int.Int64,
    +                                               Data.Word.Word16, Double,
    +                                               Float] t) =>
    +          Tensor v1 t -- ^ __x__
    +          -> Tensor Value t -- ^ __y__
    +square x | eqLengthGuard [] =
    +    buildOp (opDef "Square"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
    +--
    +-- See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
    +-- ](http://arxiv.org/abs/1511.07289)
    +elu :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                            Data.Int.Int64, Data.Int.Int8,
    +                                            Data.Word.Word16, Data.Word.Word8,
    +                                            Double, Float] t) =>
    +       Tensor v1 t -- ^ __features__
    +       -> Tensor Value t -- ^ __activations__
    +elu features | eqLengthGuard [] =
    +    buildOp (opDef "Elu"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "features" type_attr: "T" }
    +output_arg { name: "activations" type_attr: "T" }
    +-}
    +
    +-- | Outputs all keys and values in the table.
    +
    +lookupTableExport :: forall v1 tkeys tvalues . (TensorType tkeys,
    +                                                TensorType tvalues) =>
    +                     Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
    +                     -> (Tensor Value tkeys, Tensor Value tvalues)
    +                     -- ^ (__keys__, __values__)
    +                     --
    +                     -- * __keys__: Vector of all keys present in the table.
    +                     --
    +                     -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
    +lookupTableExport table_handle | eqLengthGuard [] =
    +    buildOp (opDef "LookupTableExport"
    +             & opAttr "Tkeys" .~ tensorType (undefined :: tkeys)
    +             & opAttr "Tvalues" .~ tensorType (undefined :: tvalues))
    +        table_handle
    +{-
    +attr { name: "Tkeys" type: "type" }
    +attr { name: "Tvalues" type: "type" }
    +input_arg {
    +  description: "Handle to the table."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "Vector of all keys present in the table."
    +  name: "keys"
    +  type_attr: "Tkeys"
    +}
    +output_arg {
    +  description: "Tensor of all values in the table. Indexed in parallel with `keys`."
    +  name: "values"
    +  type_attr: "Tvalues"
    +}
    +-}
    +
    +-- | Computes the number of elements in the given table.
    +
    +lookupTableSize :: Tensor v1 Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
    +                   -> Tensor Value Data.Int.Int64 -- ^ __size__: Scalar that contains number of elements in the table.
    +lookupTableSize table_handle | eqLengthGuard [] =
    +    buildOp (opDef "LookupTableSize")
    +        table_handle
    +{-
    +input_arg {
    +  description: "Handle to the table."
    +  is_ref: true
    +  name: "table_handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "Scalar that contains number of elements in the table."
    +  name: "size"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Computes gradients of the average pooling function.
    +
    +avgPoolGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                       Float] t) =>
    +               Tensor v1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D.  Shape of the original input to `avg_pool`.
    +               -> Tensor v2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
    +                              -- the output of `avg_pool`.
    +               -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `avg_pool`.
    +avgPoolGrad orig_input_shape grad | eqLengthGuard [] =
    +    buildOp (opDef "AvgPoolGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input_shape grad
    +{-
    +attr {
    +  description: "The size of the sliding window for each dimension of the input."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the original input to `avg_pool`."
    +  name: "orig_input_shape"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.\nthe output of `avg_pool`."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D.  Gradients w.r.t. the input of `avg_pool`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the ids of the positions in sampled_candidates that match true_labels.
    +--
    +-- When doing log-odds NCE, the result of this op should be passed through a
    +-- SparseToDense op, then added to the logits of the sampled candidates. This has
    +-- the effect of 'removing' the sampled labels that match the true labels by
    +-- making the classifier sure that they are sampled labels.
    +computeAccidentalHits :: Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                         -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels.
    +                         -> Tensor v2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler.
    +                         -> (Tensor Value Data.Int.Int32,
    +                             Tensor Value Data.Int.Int64, Tensor Value Float)
    +                         -- ^ (__indices__, __ids__, __weights__)
    +                         --
    +                         -- * __indices__: A vector of indices corresponding to rows of true_candidates.
    +                         --
    +                         -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label
    +                         -- for the row with the corresponding index in indices.
    +                         --
    +                         -- * __weights__: A vector of the same length as indices and ids, in which each element
    +                         -- is -FLOAT_MAX.
    +computeAccidentalHits num_true true_classes
    +                      sampled_candidates | eqLengthGuard [] =
    +    buildOp (opDef "ComputeAccidentalHits"
    +             & opAttr "num_true" .~ num_true)
    +        true_classes sampled_candidates
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The true_classes output of UnpackSparseLabels."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "The sampled_candidates output of CandidateSampler."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of indices corresponding to rows of true_candidates."
    +  name: "indices"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices."
    +  name: "ids"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX."
    +  name: "weights"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
    +--
    +-- the gradient.  This class performs the softmax operation for you, so inputs
    +-- should be e.g. linear projections of outputs by an LSTM.
    +cTCLoss :: Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    +           -> Tensor v2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor<int32, 2>`.
    +                                       -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
    +                                       -- `(batch b, time t)`.
    +           -> Tensor v3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time.
    +           -> Tensor v4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch).
    +           -> (Tensor Value Float, Tensor Value Float)
    +           -- ^ (__loss__, __gradient__)
    +           --
    +           -- * __loss__: A vector (batch) containing log-probabilities.
    +           --
    +           -- * __gradient__: The gradient of `loss`.  3-D, shape:
    +           -- `(max_time x batch_size x num_classes)`.
    +cTCLoss inputs labels_indices labels_values sequence_length | eqLengthGuard [] =
    +    buildOp (opDef "CTCLoss")
    +        inputs labels_indices labels_values sequence_length
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation."
    +  name: "preprocess_collapse_repeated"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "Scalar.  If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels.  This is a simplified version of CTC."
    +  name: "ctc_merge_repeated"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    +  name: "inputs"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "The indices of a `SparseTensor<int32, 2>`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`."
    +  name: "labels_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "The values (labels) associated with the given batch and time."
    +  name: "labels_values"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A vector containing sequence lengths (batch)."
    +  name: "sequence_length"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A vector (batch) containing log-probabilities."
    +  name: "loss"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "The gradient of `loss`.  3-D, shape:\n`(max_time x batch_size x num_classes)`."
    +  name: "gradient"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Performs 3D average pooling on the input.
    +
    +avgPool3D :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t) =>
    +             Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
    +             -> Tensor Value t -- ^ __output__: The average pooled output tensor.
    +avgPool3D input | eqLengthGuard [] =
    +    buildOp (opDef "AvgPool3D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The average pooled output tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the reciprocal of x element-wise.
    +--
    +-- I.e., \\(y = 1 / x\\).
    +inv :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Int.Int32, Data.Int.Int64,
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +inv x | eqLengthGuard [] =
    +    buildOp (opDef "Inv"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Pop the element at the top of the stack.
    +
    +stackPop :: forall v1 elem_type . (TensorType elem_type) =>
    +            Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
    +            -> Tensor Value elem_type -- ^ __elem__: The tensor that is popped from the top of the stack.
    +stackPop handle | eqLengthGuard [] =
    +    buildOp (opDef "StackPop"
    +             & opAttr "elem_type" .~ tensorType (undefined :: elem_type))
    +        handle
    +{-
    +attr {
    +  description: "The type of the elem that is popped."
    +  name: "elem_type"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The handle to a stack."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "The tensor that is popped from the top of the stack."
    +  name: "elem"
    +  type_attr: "elem_type"
    +}
    +-}
    +
    +-- | A queue that produces elements in first-in first-out order.
    +--
    +-- Variable-size shapes are allowed by setting the corresponding shape dimensions
    +-- to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
    +-- size of any given element in the minibatch.  See below for details.
    +paddingFIFOQueue :: Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle to the queue.
    +paddingFIFOQueue  | eqLengthGuard [] =
    +    buildOp (opDef "PaddingFIFOQueue")
    +        
    +{-
    +attr {
    +  description: "The type of each component in a value."
    +  has_minimum: true
    +  minimum: 1
    +  name: "component_types"
    +  type: "list(type)"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1.  In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time."
    +  has_minimum: true
    +  name: "shapes"
    +  type: "list(shape)"
    +}
    +attr {
    +  default_value { i: -1 }
    +  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
    +  name: "capacity"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "The handle to the queue."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | 
    +
    +batchSelfAdjointEigV2 :: forall v1 t . (TensorType t, OneOf '[Double,
    +                                                              Float] t) =>
    +                         Tensor v1 t -- ^ __input__
    +                         -> (Tensor Value t, Tensor Value t)
    +                         -- ^ (__e__, __v__)
    +                         --
    +                         -- * __e__
    +                         --
    +                         -- * __v__
    +batchSelfAdjointEigV2 input | eqLengthGuard [] =
    +    buildOp (opDef "BatchSelfAdjointEigV2"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { default_value { b: true } name: "compute_v" type: "bool" }
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "e" type_attr: "T" }
    +output_arg { name: "v" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchMatrixTriangularSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double,
    +                                                                      Float] t) =>
    +                              Tensor v1 t -- ^ __matrix__
    +                              -> Tensor v2 t -- ^ __rhs__
    +                              -> Tensor Value t -- ^ __output__
    +batchMatrixTriangularSolve matrix rhs | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixTriangularSolve"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs
    +{-
    +attr { default_value { b: true } name: "lower" type: "bool" }
    +attr { default_value { b: false } name: "adjoint" type: "bool" }
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "matrix" type_attr: "T" }
    +input_arg { name: "rhs" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchMatrixSolveLs :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double,
    +                                                                 Float] t) =>
    +                      Tensor v1 t -- ^ __matrix__
    +                      -> Tensor v2 t -- ^ __rhs__
    +                      -> Tensor v3 Double -- ^ __l2_regularizer__
    +                      -> Tensor Value t -- ^ __output__
    +batchMatrixSolveLs matrix rhs l2_regularizer | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatrixSolveLs"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        matrix rhs l2_regularizer
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +attr { default_value { b: true } name: "fast" type: "bool" }
    +input_arg { name: "matrix" type_attr: "T" }
    +input_arg { name: "rhs" type_attr: "T" }
    +input_arg { name: "l2_regularizer" type: DT_DOUBLE }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchSvd :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                 (Data.Complex.Complex Float),
    +                                                 Double, Float] t) =>
    +            Tensor v1 t -- ^ __input__
    +            -> (Tensor Value t, Tensor Value t, Tensor Value t)
    +            -- ^ (__s__, __u__, __v__)
    +            --
    +            -- * __s__
    +            --
    +            -- * __u__
    +            --
    +            -- * __v__
    +batchSvd input | eqLengthGuard [] =
    +    buildOp (opDef "BatchSvd"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { default_value { b: true } name: "compute_uv" type: "bool" }
    +attr {
    +  default_value { b: false } name: "full_matrices" type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_DOUBLE
    +      type: DT_FLOAT
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "s" type_attr: "T" }
    +output_arg { name: "u" type_attr: "T" }
    +output_arg { name: "v" type_attr: "T" }
    +-}
    +
    +-- | Outputs a `Summary` protocol buffer with a tensor.
    +
    +tensorSummary :: forall v1 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __tensor__: A tensor to serialize.
    +                 -> Tensor Value Data.ByteString.ByteString -- ^ __summary__
    +tensorSummary tensor | eqLengthGuard [] =
    +    buildOp (opDef "TensorSummary"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tensor
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "A name to associate with the data series."
    +  name: "display_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "An optional long description of the data being output."
    +  name: "description"
    +  type: "string"
    +}
    +attr {
    +  default_value { list { } }
    +  description: "a list of strings used to specify how the data can be interpreted, e.g.\na string tensor containing jpg images should have \'encoding:image/jpg\'; a\nstring tensor with foo protos should have \'encoding:proto/X/Y/foo.proto\';\na numeric tensor containing bounding boxes may have\n\'bounding_box:x1,y1,x2,y2,\'. If the tensor is a part of a group of related\noutputs, that can be encoded through a \'group:$groupName/$roleInGroup\' label.\nLabels may be formatted as \'prefix:value\'. The prefix may be re-used."
    +  name: "labels"
    +  type: "list(string)"
    +}
    +input_arg {
    +  description: "A tensor to serialize." name: "tensor" type_attr: "T"
    +}
    +output_arg { name: "summary" type: DT_STRING }
    +-}
    +
    +-- | Computes softmax cross entropy cost and gradients to backpropagate.
    +--
    +-- Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
    +-- a matrix of label probabilities, but rather a single label per row
    +-- of features.  This label is considered to have probability 1.0 for the
    +-- given row.
    +-- 
    +-- Inputs are the logits, not probabilities.
    +sparseSoftmaxCrossEntropyWithLogits :: forall v1 v2 t tlabels . (TensorType t,
    +                                                                 OneOf '[Data.Word.Word16,
    +                                                                         Double,
    +                                                                         Float] t,
    +                                                                 TensorType tlabels,
    +                                                                 OneOf '[Data.Int.Int32,
    +                                                                         Data.Int.Int64] tlabels) =>
    +                                       Tensor v1 t -- ^ __features__: batch_size x num_classes matrix
    +                                       -> Tensor v2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes).
    +                                                            -- This is the label for the given minibatch entry.
    +                                       -> (Tensor Value t, Tensor Value t)
    +                                       -- ^ (__loss__, __backprop__)
    +                                       --
    +                                       -- * __loss__: Per example loss (batch_size vector).
    +                                       --
    +                                       -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
    +sparseSoftmaxCrossEntropyWithLogits features labels | eqLengthGuard [] =
    +    buildOp (opDef "SparseSoftmaxCrossEntropyWithLogits"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tlabels" .~ tensorType (undefined :: tlabels))
    +        features labels
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT64 }
    +  name: "Tlabels"
    +  type: "type"
    +}
    +input_arg {
    +  description: "batch_size x num_classes matrix"
    +  name: "features"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry."
    +  name: "labels"
    +  type_attr: "Tlabels"
    +}
    +output_arg {
    +  description: "Per example loss (batch_size vector)."
    +  name: "loss"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "backpropagated gradients (batch_size x num_classes matrix)."
    +  name: "backprop"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Performs max pooling on the input and outputs both max values and indices.
    +--
    +-- The indices in `argmax` are flattened, so that a maximum value at position
    +-- `[b, y, x, c]` becomes flattened index
    +-- `((b * height + y) * width + x) * channels + c`.
    +maxPoolWithArgmax :: forall v1 t targmax . (TensorType t,
    +                                            OneOf '[Data.Word.Word16, Float] t,
    +                                            TensorType targmax,
    +                                            OneOf '[Data.Int.Int32,
    +                                                    Data.Int.Int64] targmax) =>
    +                     Tensor v1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
    +                     -> (Tensor Value t, Tensor Value targmax)
    +                     -- ^ (__output__, __argmax__)
    +                     --
    +                     -- * __output__: The max pooled output tensor.
    +                     --
    +                     -- * __argmax__: 4-D.  The flattened indices of the max values chosen for each output.
    +maxPoolWithArgmax input | eqLengthGuard [] =
    +    buildOp (opDef "MaxPoolWithArgmax"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Targmax" .~ tensorType (undefined :: targmax))
    +        input
    +{-
    +attr {
    +  description: "The size of the window for each dimension of the input tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT64 }
    +  name: "Targmax"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`.  Input to pool over."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The max pooled output tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D.  The flattened indices of the max values chosen for each output."
    +  name: "argmax"
    +  type_attr: "Targmax"
    +}
    +-}
    +
    +-- | Compute the 1-dimensional discrete Fourier Transform over the inner-most
    +--
    +-- dimension of `input`.
    +fFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +       -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
    +       -- dimension of `input` is replaced with its 1D Fourier Transform.
    +fFT input | eqLengthGuard [] =
    +    buildOp (opDef "FFT")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its 1D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | Outputs a `Summary` protocol buffer with a histogram.
    +--
    +-- The generated
    +-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
    +-- has one summary value containing a histogram for `values`.
    +-- 
    +-- This op reports an `InvalidArgument` error if any value is not finite.
    +histogramSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double, Float] t) =>
    +                    Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar.  Tag to use for the `Summary.Value`.
    +                    -> Tensor v2 t -- ^ __values__: Any shape. Values to use to build the histogram.
    +                    -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
    +histogramSummary tag values | eqLengthGuard [] =
    +    buildOp (opDef "HistogramSummary"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tag values
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Scalar.  Tag to use for the `Summary.Value`."
    +  name: "tag"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Any shape. Values to use to build the histogram."
    +  name: "values"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Scalar. Serialized `Summary` protocol buffer."
    +  name: "summary"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Pads a tensor with zeros.
    +--
    +-- This operation pads a `input` with zeros according to the `paddings` you
    +-- specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
    +-- rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
    +-- how many zeros to add before the contents of `input` in that dimension, and
    +-- `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
    +-- in that dimension.
    +-- 
    +-- The padded size of each dimension D of the output is:
    +-- 
    +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[1, 1], [2, 2]]
    +-- # 'paddings' is [[1, 1], [2, 2]]
    +-- # rank of 't' is 2
    +-- pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
    +--                       [0, 0, 1, 1, 0, 0]
    +--                       [0, 0, 2, 2, 0, 0]
    +--                       [0, 0, 0, 0, 0, 0]]
    +-- ```
    +pad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
    +                                   OneOf '[Data.Int.Int32,
    +                                           Data.Int.Int64] tpaddings) =>
    +       Tensor v1 t -- ^ __input__
    +       -> Tensor v2 tpaddings -- ^ __paddings__
    +       -> Tensor Value t -- ^ __output__
    +pad input paddings | eqLengthGuard [] =
    +    buildOp (opDef "Pad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
    +        input paddings
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tpaddings"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "paddings" type_attr: "Tpaddings" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchIFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +               -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchIFFT3D input | eqLengthGuard [] =
    +    buildOp (opDef "BatchIFFT3D")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | Outputs a `Summary` protocol buffer with images.
    +--
    +-- The summary has up to `max_images` summary values containing images. The
    +-- images are built from `tensor` which must be 4-D with shape `[batch_size,
    +-- height, width, channels]` and where `channels` can be:
    +-- 
    +-- *  1: `tensor` is interpreted as Grayscale.
    +-- *  3: `tensor` is interpreted as RGB.
    +-- *  4: `tensor` is interpreted as RGBA.
    +-- 
    +-- The images have the same number of channels as the input tensor. For float
    +-- input, the values are normalized one image at a time to fit in the range
    +-- `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
    +-- normalization algorithms:
    +-- 
    +-- *  If the input values are all positive, they are rescaled so the largest one
    +--    is 255.
    +-- 
    +-- *  If any input value is negative, the values are shifted so input value 0.0
    +--    is at 127.  They are then rescaled so that either the smallest value is 0,
    +--    or the largest one is 255.
    +-- 
    +-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
    +-- build the `tag` of the summary values:
    +-- 
    +-- *  If `max_images` is 1, the summary value tag is '*tag*/image'.
    +-- *  If `max_images` is greater than 1, the summary value tags are
    +--    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
    +-- 
    +-- The `bad_color` argument is the color to use in the generated images for
    +-- non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
    +-- Each element must be in the range `[0, 255]` (It represents the value of a
    +-- pixel in the output image).  Non-finite values in the input tensor are
    +-- replaced by this tensor in the output image.  The default value is the color
    +-- red.
    +imageSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                        Data.Word.Word8,
    +                                                        Float] t) =>
    +                Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
    +                -> Tensor v2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where
    +                               -- `channels` is 1, 3, or 4.
    +                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
    +imageSummary tag tensor | eqLengthGuard [] =
    +    buildOp (opDef "ImageSummary"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tag tensor
    +{-
    +attr {
    +  default_value { i: 3 }
    +  description: "Max number of batch elements to generate images for."
    +  has_minimum: true
    +  minimum: 1
    +  name: "max_images"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF }
    +  }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value {
    +    tensor {
    +      dtype: DT_UINT8
    +      int_val: 255
    +      int_val: 0
    +      int_val: 0
    +      int_val: 255
    +      tensor_shape { dim { size: 4 } }
    +    }
    +  }
    +  description: "Color to use for pixels with non-finite values."
    +  name: "bad_color"
    +  type: "tensor"
    +}
    +input_arg {
    +  description: "Scalar. Used to build the `tag` attribute of the summary values."
    +  name: "tag"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4."
    +  name: "tensor"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Scalar. Serialized `Summary` protocol buffer."
    +  name: "summary"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Computes the sum along segments of a tensor.
    +--
    +-- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)
    +-- for an explanation of segments.
    +-- 
    +-- Computes a tensor such that
    +-- \\(output_i = \sum_j data_j\\) where sum is over `j` such
    +-- that `segment_ids[j] == i`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/SegmentSum.png" alt>
    +-- </div>
    +segmentSum :: forall v1 v2 t tindices . (TensorType t,
    +                                         OneOf '[(Data.Complex.Complex Double),
    +                                                 (Data.Complex.Complex Float),
    +                                                 Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t, TensorType tindices,
    +                                         OneOf '[Data.Int.Int32,
    +                                                 Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __data__
    +              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
    +                                    -- first dimension.  Values should be sorted and can be repeated.
    +              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +              -- has size `k`, the number of segments.
    +segmentSum data' segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SegmentSum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | JPEG-encode an image.
    +--
    +-- `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
    +-- 
    +-- The attr `format` can be used to override the color format of the encoded
    +-- output.  Values can be:
    +-- 
    +-- *   `''`: Use a default format based on the number of channels in the image.
    +-- *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
    +--     of `image` must be 1.
    +-- *   `rgb`: Output an RGB JPEG image. The `channels` dimension
    +--     of `image` must be 3.
    +-- 
    +-- If `format` is not specified or is the empty string, a default format is picked
    +-- in function of the number of channels in `image`:
    +-- 
    +-- *   1: Output a grayscale image.
    +-- *   3: Output an RGB image.
    +encodeJpeg :: Tensor v1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.
    +              -> Tensor Value Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image.
    +encodeJpeg image | eqLengthGuard [] =
    +    buildOp (opDef "EncodeJpeg")
    +        image
    +{-
    +attr {
    +  allowed_values { list { s: "" s: "grayscale" s: "rgb" } }
    +  default_value { s: "" }
    +  description: "Per pixel image format."
    +  name: "format"
    +  type: "string"
    +}
    +attr {
    +  default_value { i: 95 }
    +  description: "Quality of the compression from 0 to 100 (higher is better and slower)."
    +  name: "quality"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, create a JPEG that loads progressively (coarse to fine)."
    +  name: "progressive"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, spend CPU/RAM to reduce size with no quality change."
    +  name: "optimize_size"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "See http://en.wikipedia.org/wiki/Chroma_subsampling."
    +  name: "chroma_downsampling"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { s: "in" s: "cm" } }
    +  default_value { s: "in" }
    +  description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)."
    +  name: "density_unit"
    +  type: "string"
    +}
    +attr {
    +  default_value { i: 300 }
    +  description: "Horizontal pixels per density unit."
    +  name: "x_density"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 300 }
    +  description: "Vertical pixels per density unit."
    +  name: "y_density"
    +  type: "int"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If not empty, embed this XMP metadata in the image header."
    +  name: "xmp_metadata"
    +  type: "string"
    +}
    +input_arg {
    +  description: "3-D with shape `[height, width, channels]`."
    +  name: "image"
    +  type: DT_UINT8
    +}
    +output_arg {
    +  description: "0-D. JPEG-encoded image."
    +  name: "contents"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Gradients for batch normalization.
    +--
    +-- This op is deprecated. See `tf.nn.batch_normalization`.
    +batchNormWithGlobalNormalizationGrad :: forall v1 v2 v3 v4 v5 t . (TensorType t,
    +                                                                   OneOf '[(Data.Complex.Complex Double),
    +                                                                           (Data.Complex.Complex Float),
    +                                                                           Data.Int.Int16,
    +                                                                           Data.Int.Int32,
    +                                                                           Data.Int.Int64,
    +                                                                           Data.Int.Int8,
    +                                                                           Data.Word.Word16,
    +                                                                           Data.Word.Word8,
    +                                                                           Double,
    +                                                                           Float] t) =>
    +                                        Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
    +                                             -- needs to be multiplied with gamma.
    +                                        -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
    +                                        -> Tensor v1 t -- ^ __t__: A 4D input Tensor.
    +                                        -> Tensor v2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
    +                                                       -- This is the first output from tf.nn.moments,
    +                                                       -- or a saved moving average thereof.
    +                                        -> Tensor v3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
    +                                                       -- This is the second output from tf.nn.moments,
    +                                                       -- or a saved moving average thereof.
    +                                        -> Tensor v4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
    +                                                       -- If "scale_after_normalization" is true, this Tensor will be multiplied
    +                                                       -- with the normalized Tensor.
    +                                        -> Tensor v5 t -- ^ __backprop__: 4D backprop Tensor.
    +                                        -> (Tensor Value t, Tensor Value t,
    +                                            Tensor Value t, Tensor Value t,
    +                                            Tensor Value t)
    +                                        -- ^ (__dx__, __dm__, __dv__, __db__, __dg__)
    +                                        --
    +                                        -- * __dx__: 4D backprop tensor for input.
    +                                        --
    +                                        -- * __dm__: 1D backprop tensor for mean.
    +                                        --
    +                                        -- * __dv__: 1D backprop tensor for variance.
    +                                        --
    +                                        -- * __db__: 1D backprop tensor for beta.
    +                                        --
    +                                        -- * __dg__: 1D backprop tensor for gamma.
    +batchNormWithGlobalNormalizationGrad scale_after_normalization variance_epsilon
    +                                     t m v gamma backprop | eqLengthGuard [] =
    +    buildOp (opDef "BatchNormWithGlobalNormalizationGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "scale_after_normalization" .~ scale_after_normalization
    +             & opAttr "variance_epsilon" .~ variance_epsilon)
    +        t m v gamma backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "A small float number to avoid dividing by 0."
    +  name: "variance_epsilon"
    +  type: "float"
    +}
    +attr {
    +  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
    +  name: "scale_after_normalization"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "A 4D input Tensor." name: "t" type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
    +  name: "m"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
    +  name: "v"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor."
    +  name: "gamma"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4D backprop Tensor." name: "backprop" type_attr: "T"
    +}
    +output_arg {
    +  description: "4D backprop tensor for input."
    +  name: "dx"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1D backprop tensor for mean."
    +  name: "dm"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1D backprop tensor for variance."
    +  name: "dv"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1D backprop tensor for beta."
    +  name: "db"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1D backprop tensor for gamma."
    +  name: "dg"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Adds `bias` to `value`.
    +--
    +-- This is a deprecated version of BiasAdd and will be soon removed.
    +-- 
    +-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
    +-- Broadcasting is supported, so `value` may have any number of dimensions.
    +biasAddV1 :: forall v1 v2 t . (TensorType t,
    +                               OneOf '[(Data.Complex.Complex Double),
    +                                       (Data.Complex.Complex Float),
    +                                       Data.Int.Int16, Data.Int.Int32,
    +                                       Data.Int.Int64, Data.Int.Int8,
    +                                       Data.Word.Word16, Data.Word.Word8,
    +                                       Double, Float] t) =>
    +             Tensor v1 t -- ^ __value__: Any number of dimensions.
    +             -> Tensor v2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
    +             -> Tensor Value t -- ^ __output__: Broadcasted sum of `value` and `bias`.
    +biasAddV1 value bias | eqLengthGuard [] =
    +    buildOp (opDef "BiasAddV1"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value bias
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Any number of dimensions."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D with size the last dimension of `value`."
    +  name: "bias"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Broadcasted sum of `value` and `bias`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the inverse permutation of a tensor.
    +--
    +-- This operation computes the inverse of an index permutation. It takes a 1-D
    +-- integer tensor `x`, which represents the indices of a zero-based array, and
    +-- swaps each value with its index position. In other words, for an output tensor
    +-- `y` and an input tensor `x`, this operation computes the following:
    +-- 
    +-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
    +-- 
    +-- The values must include 0. There can be no duplicate values or negative values.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # tensor `x` is [3, 4, 0, 2, 1]
    +-- invert_permutation(x) ==> [2, 4, 3, 0, 1]
    +-- ```
    +invertPermutation :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64] t) =>
    +                     Tensor v1 t -- ^ __x__: 1-D.
    +                     -> Tensor Value t -- ^ __y__: 1-D.
    +invertPermutation x | eqLengthGuard [] =
    +    buildOp (opDef "InvertPermutation"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { description: "1-D." name: "x" type_attr: "T" }
    +output_arg { description: "1-D." name: "y" type_attr: "T" }
    +-}
    +
    +-- | Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
    +--
    +-- This operation folds the padded areas of `input` by `MirrorPad` according to the
    +-- `paddings` you specify. `paddings` must be the same as `paddings` argument
    +-- given to the corresponding `MirrorPad` op.
    +-- 
    +-- The folded size of each dimension D of the output is:
    +-- 
    +-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
    +-- # 'paddings' is [[0, 1]], [0, 1]].
    +-- # 'mode' is SYMMETRIC.
    +-- # rank of 't' is 2.
    +-- pad(t, paddings) ==> [[ 1,  5]
    +--                       [11, 28]]
    +-- ```
    +mirrorPadGrad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
    +                                             OneOf '[Data.Int.Int32,
    +                                                     Data.Int.Int64] tpaddings) =>
    +                 Tensor v1 t -- ^ __input__: The input tensor to be folded.
    +                 -> Tensor v2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
    +                                        -- rows must be the same as the rank of `input`.
    +                 -> Tensor Value t -- ^ __output__: The folded tensor.
    +mirrorPadGrad input paddings | eqLengthGuard [] =
    +    buildOp (opDef "MirrorPadGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
    +        input paddings
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tpaddings"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
    +  description: "The mode used in the `MirrorPad` op."
    +  name: "mode"
    +  type: "string"
    +}
    +input_arg {
    +  description: "The input tensor to be folded."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
    +  name: "paddings"
    +  type_attr: "Tpaddings"
    +}
    +output_arg {
    +  description: "The folded tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Reverses specific dimensions of a tensor.
    +--
    +-- Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
    +-- of `tensor`, this operation reverses each dimension i of `tensor` where
    +-- `dims[i]` is `True`.
    +-- 
    +-- `tensor` can have up to 8 dimensions. The number of dimensions
    +-- of `tensor` must equal the number of elements in `dims`. In other words:
    +-- 
    +-- `rank(tensor) = size(dims)`
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # tensor 't' is [[[[ 0,  1,  2,  3],
    +-- #                  [ 4,  5,  6,  7],
    +-- #                  [ 8,  9, 10, 11]],
    +-- #                 [[12, 13, 14, 15],
    +-- #                  [16, 17, 18, 19],
    +-- #                  [20, 21, 22, 23]]]]
    +-- # tensor 't' shape is [1, 2, 3, 4]
    +-- 
    +-- # 'dims' is [False, False, False, True]
    +-- reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
    +--                         [ 7,  6,  5,  4],
    +--                         [ 11, 10, 9, 8]],
    +--                        [[15, 14, 13, 12],
    +--                         [19, 18, 17, 16],
    +--                         [23, 22, 21, 20]]]]
    +-- 
    +-- # 'dims' is [False, True, False, False]
    +-- reverse(t, dims) ==> [[[[12, 13, 14, 15],
    +--                         [16, 17, 18, 19],
    +--                         [20, 21, 22, 23]
    +--                        [[ 0,  1,  2,  3],
    +--                         [ 4,  5,  6,  7],
    +--                         [ 8,  9, 10, 11]]]]
    +-- 
    +-- # 'dims' is [False, False, True, False]
    +-- reverse(t, dims) ==> [[[[8, 9, 10, 11],
    +--                         [4, 5, 6, 7],
    +--                         [0, 1, 2, 3]]
    +--                        [[20, 21, 22, 23],
    +--                         [16, 17, 18, 19],
    +--                         [12, 13, 14, 15]]]]
    +-- ```
    +reverse :: forall v1 v2 t . (TensorType t,
    +                             OneOf '[(Data.Complex.Complex Double),
    +                                     (Data.Complex.Complex Float), Bool,
    +                                     Data.Int.Int32, Data.Int.Int64,
    +                                     Data.Int.Int8, Data.Word.Word16,
    +                                     Data.Word.Word8, Double, Float] t) =>
    +           Tensor v1 t -- ^ __tensor__: Up to 8-D.
    +           -> Tensor v2 Bool -- ^ __dims__: 1-D. The dimensions to reverse.
    +           -> Tensor Value t -- ^ __output__: The same shape as `tensor`.
    +reverse tensor dims | eqLengthGuard [] =
    +    buildOp (opDef "Reverse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        tensor dims
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_BOOL
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Up to 8-D." name: "tensor" type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D. The dimensions to reverse."
    +  name: "dims"
    +  type: DT_BOOL
    +}
    +output_arg {
    +  description: "The same shape as `tensor`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes a 2-D convolution given 4-D `input` and `filter` tensors.
    +--
    +-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
    +-- and a filter / kernel tensor of shape
    +-- `[filter_height, filter_width, in_channels, out_channels]`, this op
    +-- performs the following:
    +-- 
    +-- 1. Flattens the filter to a 2-D matrix with shape
    +--    `[filter_height * filter_width * in_channels, output_channels]`.
    +-- 2. Extracts image patches from the input tensor to form a *virtual*
    +--    tensor of shape `[batch, out_height, out_width,
    +--    filter_height * filter_width * in_channels]`.
    +-- 3. For each patch, right-multiplies the filter matrix and the image patch
    +--    vector.
    +-- 
    +-- In detail, with the default NHWC format,
    +-- 
    +--     output[b, i, j, k] =
    +--         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
    +--                         filter[di, dj, q, k]
    +-- 
    +-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
    +-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
    +conv2D :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                  Float] t) =>
    +          Tensor v1 t -- ^ __input__
    +          -> Tensor v2 t -- ^ __filter__
    +          -> Tensor Value t -- ^ __output__
    +conv2D input filter | eqLengthGuard [] =
    +    buildOp (opDef "Conv2D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "filter" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradients of convolution with respect to the input.
    +
    +conv2DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
    +                                            OneOf '[Data.Word.Word16, Double,
    +                                                    Float] t) =>
    +                       Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
    +                                                -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
    +                       -> Tensor v2 t -- ^ __filter__: 4-D with shape
    +                                      -- `[filter_height, filter_width, in_channels, out_channels]`.
    +                       -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
    +                                      -- Gradients w.r.t. the output of the convolution.
    +                       -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
    +                       -- w.r.t. the input of the convolution.
    +conv2DBackpropInput input_sizes filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv2DBackpropInput"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_sizes filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg {
    +  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
    +  name: "input_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Produce a string tensor that encodes the state of a Reader.
    +--
    +-- Not all Readers support being serialized, so this can produce an
    +-- Unimplemented error.
    +readerSerializeState :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +                        -> Tensor Value Data.ByteString.ByteString -- ^ __state__
    +readerSerializeState reader_handle | eqLengthGuard [] =
    +    buildOp (opDef "ReaderSerializeState")
    +        reader_handle
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +output_arg { name: "state" type: DT_STRING }
    +-}
    +
    +-- | Returns a tensor that may be mutated, but only persists within a single step.
    +--
    +-- This is an experimental op for internal use only and it is possible to use this
    +-- op in unsafe ways.  DO NOT USE unless you fully understand the risks.
    +-- 
    +-- It is the caller's responsibility to ensure that 'ref' is eventually passed to a
    +-- matching 'DestroyTemporaryVariable' op after all other uses have completed.
    +-- 
    +-- Outputs a ref to the tensor state so it may be read or modified.
    +-- 
    +--   E.g.
    +--       var = state_ops._temporary_variable([1, 2], types.float_)
    +--       var_name = var.op.name
    +--       var = state_ops.assign(var, [[4.0, 5.0]])
    +--       var = state_ops.assign_add(var, [[6.0, 7.0]])
    +--       final = state_ops._destroy_temporary_variable(var, var_name=var_name)
    +temporaryVariable :: forall dtype . (TensorType dtype) =>
    +                     Tensor Value dtype -- ^ __ref__: A reference to the variable tensor.
    +temporaryVariable  | eqLengthGuard [] =
    +    buildOp (opDef "TemporaryVariable"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        
    +{-
    +attr {
    +  description: "The shape of the variable tensor."
    +  name: "shape"
    +  type: "shape"
    +}
    +attr {
    +  description: "The type of elements in the variable tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)."
    +  name: "var_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "A reference to the variable tensor."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Extracts crops from the input image tensor and bilinearly resizes them (possibly
    +--
    +-- with aspect ratio change) to a common output size specified by `crop_size`. This
    +-- is more general than the `crop_to_bounding_box` op which extracts a fixed size
    +-- slice from the input image and does not allow resizing or aspect ratio change.
    +-- 
    +-- Returns a tensor with `crops` from the input `image` at positions defined at the
    +-- bounding box locations in `boxes`. The cropped boxes are all resized (with
    +-- bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
    +-- result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
    +cropAndResize :: forall v1 v2 v3 v4 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                               Data.Int.Int32,
    +                                                               Data.Int.Int64,
    +                                                               Data.Int.Int8,
    +                                                               Data.Word.Word16,
    +                                                               Data.Word.Word8,
    +                                                               Double,
    +                                                               Float] t) =>
    +                 Tensor v1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
    +                             -- Both `image_height` and `image_width` need to be positive.
    +                 -> Tensor v2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
    +                                    -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
    +                                    -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
    +                                    -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
    +                                    -- `[0, 1]` interval of normalized image height is mapped to
    +                                    -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
    +                                    -- which case the sampled crop is an up-down flipped version of the original
    +                                    -- image. The width dimension is treated similarly. Normalized coordinates
    +                                    -- outside the `[0, 1]` range are allowed, in which case we use
    +                                    -- `extrapolation_value` to extrapolate the input image values.
    +                 -> Tensor v3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
    +                                             -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
    +                 -> Tensor v4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
    +                                             -- cropped image patches are resized to this size. The aspect ratio of the image
    +                                             -- content is not preserved. Both `crop_height` and `crop_width` need to be
    +                                             -- positive.
    +                 -> Tensor Value Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
    +cropAndResize image boxes box_ind crop_size | eqLengthGuard [] =
    +    buildOp (opDef "CropAndResize"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        image boxes box_ind crop_size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "bilinear" } }
    +  default_value { s: "bilinear" }
    +  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    +  name: "method"
    +  type: "string"
    +}
    +attr {
    +  default_value { f: 0.0 }
    +  description: "Value used for extrapolation, when applicable."
    +  name: "extrapolation_value"
    +  type: "float"
    +}
    +input_arg {
    +  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
    +  name: "image"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    +  name: "boxes"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    +  name: "box_ind"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive."
    +  name: "crop_size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    +  name: "crops"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes gradients of the maxpooling function.
    +
    +maxPoolGrad :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Word.Word16,
    +                                                          Float] t) =>
    +               Tensor v1 t -- ^ __orig_input__: The original input tensor.
    +               -> Tensor v2 t -- ^ __orig_output__: The original output tensor.
    +               -> Tensor v3 t -- ^ __grad__: 4-D.  Gradients w.r.t. the output of `max_pool`.
    +               -> Tensor Value t -- ^ __output__: Gradients w.r.t. the input to `max_pool`.
    +maxPoolGrad orig_input orig_output grad | eqLengthGuard [] =
    +    buildOp (opDef "MaxPoolGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input orig_output grad
    +{-
    +attr {
    +  description: "The size of the window for each dimension of the input tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The original input tensor."
    +  name: "orig_input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The original output tensor."
    +  name: "orig_output"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D.  Gradients w.r.t. the output of `max_pool`."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Gradients w.r.t. the input to `max_pool`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Performs a resize and padding as a preprocess during a convolution.
    +--
    +-- It's often possible to do spatial transformations more efficiently as part of
    +-- the packing stage of a convolution, so this op allows for an optimized
    +-- implementation where these stages are fused together. This prevents the need to
    +-- write out the intermediate results as whole tensors, reducing memory pressure,
    +-- and we can get some latency gains by merging the transformation calculations.
    +-- The data_format attribute for Conv2D isn't supported by this op, and defaults to
    +-- 'NHWC' order.
    +-- Internally this op uses a single per-graph scratch buffer, which means that it
    +-- will block if multiple versions are being run in parallel. This is because this
    +-- operator is primarily an optimization to minimize memory usage.
    +fusedResizeAndPadConv2D :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                                   OneOf '[Data.Word.Word16,
    +                                                           Double, Float] t) =>
    +                           Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
    +                           -> Tensor v2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
    +                                                       -- new size for the images.
    +                           -> Tensor v3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
    +                                                       -- rows must be the same as the rank of `input`.
    +                           -> Tensor v4 t -- ^ __filter__: 4-D with shape
    +                                          -- `[filter_height, filter_width, in_channels, out_channels]`.
    +                           -> Tensor Value t -- ^ __output__
    +fusedResizeAndPadConv2D input size paddings filter | eqLengthGuard [] =
    +    buildOp (opDef "FusedResizeAndPadConv2D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input size paddings filter
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
    +  name: "resize_align_corners"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
    +  name: "mode"
    +  type: "string"
    +}
    +attr {
    +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
    +  name: "size"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
    +  name: "paddings"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Outputs random values from a uniform distribution.
    +--
    +-- The generated values follow a uniform distribution in the range `[0, 1)`. The
    +-- lower bound 0 is included in the range, while the upper bound 1 is excluded.
    +randomUniform :: forall v1 t dtype . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                            Data.Int.Int64] t,
    +                                      TensorType dtype,
    +                                      OneOf '[Data.Word.Word16, Double,
    +                                              Float] dtype) =>
    +                 Tensor v1 t -- ^ __shape__: The shape of the output tensor.
    +                 -> Tensor Value dtype -- ^ __output__: A tensor of the specified shape filled with uniform random values.
    +randomUniform shape | eqLengthGuard [] =
    +    buildOp (opDef "RandomUniform"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        shape
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  description: "The type of the output."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The shape of the output tensor."
    +  name: "shape"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A tensor of the specified shape filled with uniform random values."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
    +--
    +-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
    +-- and a filter / kernel tensor of shape
    +-- `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
    +-- `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
    +-- a different filter to each input channel (expanding from 1 channel to
    +-- `channel_multiplier` channels for each), then concatenates the results
    +-- together. Thus, the output has `in_channels * channel_multiplier` channels.
    +-- 
    +-- for k in 0..in_channels-1
    +--   for q in 0..channel_multiplier-1
    +--     output[b, i, j, k * channel_multiplier + q] =
    +--       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
    +--                         filter[di, dj, k, q]
    +-- 
    +-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
    +-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
    +depthwiseConv2dNative :: forall v1 v2 t . (TensorType t, OneOf '[Double,
    +                                                                 Float] t) =>
    +                         Tensor v1 t -- ^ __input__
    +                         -> Tensor v2 t -- ^ __filter__
    +                         -> Tensor Value t -- ^ __output__
    +depthwiseConv2dNative input filter | eqLengthGuard [] =
    +    buildOp (opDef "DepthwiseConv2dNative"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg { name: "filter" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | var: Should be from a Variable().
    +
    +sparseApplyAdadelta :: forall v1 v2 v3 v4 v5 v6 v7 v8 t
    +                       tindices . (TensorType t,
    +                                   OneOf '[(Data.Complex.Complex Double),
    +                                           (Data.Complex.Complex Float),
    +                                           Data.Int.Int16, Data.Int.Int32,
    +                                           Data.Int.Int64, Data.Int.Int8,
    +                                           Data.Word.Word16, Data.Word.Word8,
    +                                           Double, Float] t,
    +                                   TensorType tindices, OneOf '[Data.Int.Int32,
    +                                                                Data.Int.Int64] tindices) =>
    +                       Tensor v1 t -- ^ __var__
    +                       -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                       -> Tensor v3 t -- ^ __accum_update__: : Should be from a Variable().
    +                       -> Tensor v4 t -- ^ __lr__: Learning rate. Must be a scalar.
    +                       -> Tensor v5 t -- ^ __rho__: Decay factor. Must be a scalar.
    +                       -> Tensor v6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
    +                       -> Tensor v7 t -- ^ __grad__: The gradient.
    +                       -> Tensor v8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                       -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyAdadelta var accum accum_update lr rho epsilon grad
    +                    indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyAdadelta"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var accum accum_update lr rho epsilon grad indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg { is_ref: true name: "var" type_attr: "T" }
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: ": Should be from a Variable()."
    +  is_ref: true
    +  name: "accum_update"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Learning rate. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Decay factor. Must be a scalar."
    +  name: "rho"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Constant factor. Must be a scalar."
    +  name: "epsilon"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradients of depthwise convolution with respect to the filter.
    +
    +depthwiseConv2dNativeBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
    +                                                            OneOf '[Double,
    +                                                                    Float] t) =>
    +                                       Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
    +                                       -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
    +                                                                   -- where `filter` is a 4-D
    +                                                                   -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
    +                                       -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
    +                                                      -- Gradients w.r.t. the output of the convolution.
    +                                       -> Tensor Value t -- ^ __output__: 4-D with shape
    +                                       -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
    +                                       -- the `filter` input of the convolution.
    +depthwiseConv2dNativeBackpropFilter input filter_sizes
    +                                    out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "DepthwiseConv2dNativeBackpropFilter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter_sizes out_backprop
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor."
    +  name: "filter_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes a 3-D convolution given 5-D `input` and `filter` tensors.
    +--
    +-- In signal processing, cross-correlation is a measure of similarity of
    +-- two waveforms as a function of a time-lag applied to one of them. This
    +-- is also known as a sliding dot product or sliding inner-product.
    +-- 
    +-- Our Conv3D implements a form of cross-correlation.
    +conv3D :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t) =>
    +          Tensor v1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
    +          -> Tensor v2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels,
    +                         -- out_channels]`. `in_channels` must match between `input` and `filter`.
    +          -> Tensor Value t -- ^ __output__
    +conv3D input filter | eqLengthGuard [] =
    +    buildOp (opDef "Conv3D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns the truth value of (x >= y) element-wise.
    +--
    +-- *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +greaterEqual :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                        Data.Int.Int32,
    +                                                        Data.Int.Int64,
    +                                                        Data.Int.Int8,
    +                                                        Data.Word.Word16,
    +                                                        Data.Word.Word8, Double,
    +                                                        Float] t) =>
    +                Tensor v1 t -- ^ __x__
    +                -> Tensor v2 t -- ^ __y__
    +                -> Tensor Value Bool -- ^ __z__
    +greaterEqual x y | eqLengthGuard [] =
    +    buildOp (opDef "GreaterEqual"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Adds up a SparseTensor and a dense Tensor, using these special rules:
    +--
    +-- (1) Broadcasts the dense side to have the same shape as the sparse side, if
    +--     eligible;
    +-- (2) Then, only the dense values pointed to by the indices of the SparseTensor
    +--     participate in the cwise addition.
    +-- 
    +-- By these rules, the result is a logical SparseTensor with exactly the same
    +-- indices and shape, but possibly with different non-zero values.  The output of
    +-- this Op is the resultant non-zero values.
    +sparseDenseCwiseAdd :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                               OneOf '[(Data.Complex.Complex Double),
    +                                                       (Data.Complex.Complex Float),
    +                                                       Data.Int.Int16,
    +                                                       Data.Int.Int32,
    +                                                       Data.Int.Int64,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                -- SparseTensor, possibly not in canonical ordering.
    +                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
    +                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
    +                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
    +sparseDenseCwiseAdd sp_indices sp_values sp_shape dense | eqLengthGuard [] =
    +    buildOp (opDef "SparseDenseCwiseAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sp_indices sp_values sp_shape dense
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "sp_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
    +  name: "sp_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "sp_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "`R`-D.  The dense Tensor operand."
    +  name: "dense"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D.  The `N` values that are operated on."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradients of 3-D convolution with respect to the filter.
    +
    +conv3DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
    +                                             OneOf '[(Data.Complex.Complex Double),
    +                                                     (Data.Complex.Complex Float),
    +                                                     Data.Int.Int16,
    +                                                     Data.Int.Int32,
    +                                                     Data.Int.Int64,
    +                                                     Data.Int.Int8,
    +                                                     Data.Word.Word16,
    +                                                     Data.Word.Word8, Double,
    +                                                     Float] t) =>
    +                        Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
    +                        -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
    +                                       -- `in_channels` must match between `input` and `filter`.
    +                        -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
    +                                       -- out_channels]`.
    +                        -> Tensor Value t -- ^ __output__
    +conv3DBackpropFilter input filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv3DBackpropFilter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradients of 3-D convolution with respect to the input.
    +
    +conv3DBackpropInputV2 :: forall v1 v2 v3 t . (TensorType t,
    +                                              OneOf '[(Data.Complex.Complex Double),
    +                                                      (Data.Complex.Complex Float),
    +                                                      Data.Int.Int16,
    +                                                      Data.Int.Int32,
    +                                                      Data.Int.Int64,
    +                                                      Data.Int.Int8,
    +                                                      Data.Word.Word16,
    +                                                      Data.Word.Word8, Double,
    +                                                      Float] t) =>
    +                         Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`,
    +                                                  -- where `input` is a 5-D
    +                                                  -- `[batch, depth, rows, cols, in_channels]` tensor.
    +                         -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
    +                                        -- `in_channels` must match between `input` and `filter`.
    +                         -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
    +                                        -- out_channels]`.
    +                         -> Tensor Value t -- ^ __output__
    +conv3DBackpropInputV2 input_sizes filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv3DBackpropInputV2"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_sizes filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor."
    +  name: "input_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Returns element-wise remainder of division.
    +--
    +-- *NOTE*: `Mod` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +mod :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64,
    +                                               Double, Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +mod x y | eqLengthGuard [] =
    +    buildOp (opDef "Mod"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Forwards the value of an available tensor from `inputs` to `output`.
    +--
    +-- `Merge` waits for at least one of the tensors in `inputs` to become available.
    +-- It is usually combined with `Switch` to implement branching.
    +-- 
    +-- `Merge` forwards the first tensor for become available to `output`, and sets
    +-- `value_index` to its index in `inputs`.
    +refMerge :: forall v1 t . (TensorType t) =>
    +            [Tensor v1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
    +            -> (Tensor Value t, Tensor Value Data.Int.Int32)
    +            -- ^ (__output__, __value_index__)
    +            --
    +            -- * __output__: Will be set to the available input tensor.
    +            --
    +            -- * __value_index__: The index of the chosen input tensor in `inputs`.
    +refMerge inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "RefMerge"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        inputs
    +{-
    +attr { name: "T" type: "type" }
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +input_arg {
    +  description: "The input tensors, exactly one of which will become available."
    +  is_ref: true
    +  name: "inputs"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Will be set to the available input tensor."
    +  is_ref: true
    +  name: "output"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The index of the chosen input tensor in `inputs`."
    +  name: "value_index"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Computes the gradients of 3-D convolution with respect to the filter.
    +
    +conv3DBackpropFilterV2 :: forall v1 v2 v3 t . (TensorType t,
    +                                               OneOf '[(Data.Complex.Complex Double),
    +                                                       (Data.Complex.Complex Float),
    +                                                       Data.Int.Int16,
    +                                                       Data.Int.Int32,
    +                                                       Data.Int.Int64,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +                          Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
    +                          -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
    +                                                      -- where `filter` is a 5-D
    +                                                      -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
    +                                                      -- tensor.
    +                          -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
    +                                         -- out_channels]`.
    +                          -> Tensor Value t -- ^ __output__
    +conv3DBackpropFilterV2 input filter_sizes out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv3DBackpropFilterV2"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter_sizes out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Shape `[batch, depth, rows, cols, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor."
    +  name: "filter_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
    +--
    +-- The `SparseTensor` must have rank `R` greater than 1, and the first dimension
    +-- is treated as the minibatch dimension.  Elements of the `SparseTensor`
    +-- must be sorted in increasing order of this first dimension.  The serialized
    +-- `SparseTensor` objects going into each row of `serialized_sparse` will have
    +-- rank `R-1`.
    +-- 
    +-- The minibatch size `N` is extracted from `sparse_shape[0]`.
    +serializeManySparse :: forall v1 v2 v3 t . (TensorType t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
    +                       -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
    +                       -> Tensor Value Data.ByteString.ByteString -- ^ __serialized_sparse__
    +serializeManySparse sparse_indices sparse_values
    +                    sparse_shape | eqLengthGuard [] =
    +    buildOp (opDef "SerializeManySparse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sparse_indices sparse_values sparse_shape
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
    +  name: "sparse_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the minibatch `SparseTensor`."
    +  name: "sparse_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
    +  name: "sparse_shape"
    +  type: DT_INT64
    +}
    +output_arg { name: "serialized_sparse" type: DT_STRING }
    +-}
    +
    +-- | Computes gradients of average pooling function.
    +
    +avgPool3DGrad :: forall v1 v2 t . (TensorType t,
    +                                   OneOf '[(Data.Complex.Complex Double),
    +                                           (Data.Complex.Complex Float),
    +                                           Data.Int.Int16, Data.Int.Int32,
    +                                           Data.Int.Int64, Data.Int.Int8,
    +                                           Data.Word.Word16, Data.Word.Word8,
    +                                           Double, Float] t) =>
    +                 Tensor v1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions.
    +                 -> Tensor v2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
    +                 -> Tensor Value t -- ^ __output__: The backprop for input.
    +avgPool3DGrad orig_input_shape grad | eqLengthGuard [] =
    +    buildOp (opDef "AvgPool3DGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input_shape grad
    +{-
    +attr {
    +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The original input dimensions."
    +  name: "orig_input_shape"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The backprop for input."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes gradients of max pooling function.
    +
    +maxPool3DGrad :: forall v1 v2 v3 t . (TensorType t,
    +                                      OneOf '[(Data.Complex.Complex Double),
    +                                              (Data.Complex.Complex Float),
    +                                              Data.Int.Int16, Data.Int.Int32,
    +                                              Data.Int.Int64, Data.Int.Int8,
    +                                              Data.Word.Word16, Data.Word.Word8,
    +                                              Double, Float] t) =>
    +                 Tensor v1 Float -- ^ __orig_input__: The original input tensor.
    +                 -> Tensor v2 Float -- ^ __orig_output__: The original output tensor.
    +                 -> Tensor v3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
    +                 -> Tensor Value t -- ^ __output__
    +maxPool3DGrad orig_input orig_output grad | eqLengthGuard [] =
    +    buildOp (opDef "MaxPool3DGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        orig_input orig_output grad
    +{-
    +attr {
    +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The original input tensor."
    +  name: "orig_input"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "The original output tensor."
    +  name: "orig_output"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Computes the sum of elements across dimensions of a SparseTensor.
    +--
    +-- This Op takes a SparseTensor and is the sparse counterpart to
    +-- `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
    +-- instead of a sparse one.
    +-- 
    +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
    +-- with length 1.
    +-- 
    +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
    +-- with a single element is returned.  Additionally, the axes can be negative,
    +-- which are interpreted according to the indexing rules in Python.
    +sparseReduceSum :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                           OneOf '[(Data.Complex.Complex Double),
    +                                                   (Data.Complex.Complex Float),
    +                                                   Data.Int.Int16,
    +                                                   Data.Int.Int32,
    +                                                   Data.Int.Int64,
    +                                                   Data.Int.Int8,
    +                                                   Data.Word.Word16,
    +                                                   Data.Word.Word8, Double,
    +                                                   Float] t) =>
    +                   Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                            -- SparseTensor, possibly not in canonical ordering.
    +                   -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
    +                   -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
    +                   -> Tensor v4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
    +                   -> Tensor Value t -- ^ __output__: `R-K`-D.  The reduced Tensor.
    +sparseReduceSum input_indices input_values input_shape
    +                reduction_axes | eqLengthGuard [] =
    +    buildOp (opDef "SparseReduceSum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_indices input_values input_shape reduction_axes
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "input_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
    +  name: "input_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "input_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  Length-`K` vector containing the reduction axes."
    +  name: "reduction_axes"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "`R-K`-D.  The reduced Tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes rectified linear: `max(features, 0)`.
    +
    +relu :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                             Data.Int.Int64, Data.Int.Int8,
    +                                             Data.Word.Word16, Data.Word.Word8,
    +                                             Double, Float] t) =>
    +        Tensor v1 t -- ^ __features__
    +        -> Tensor Value t -- ^ __activations__
    +relu features | eqLengthGuard [] =
    +    buildOp (opDef "Relu"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "features" type_attr: "T" }
    +output_arg { name: "activations" type_attr: "T" }
    +-}
    +
    +-- | L2 Loss.
    +--
    +-- Computes half the L2 norm of a tensor without the `sqrt`:
    +-- 
    +--     output = sum(t ** 2) / 2
    +l2Loss :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int16, Data.Int.Int32,
    +                                               Data.Int.Int64, Data.Int.Int8,
    +                                               Data.Word.Word16,
    +                                               Data.Word.Word8, Double,
    +                                               Float] t) =>
    +          Tensor v1 t -- ^ __t__: Typically 2-D, but may have any dimensions.
    +          -> Tensor Value t -- ^ __output__: 0-D.
    +l2Loss t | eqLengthGuard [] =
    +    buildOp (opDef "L2Loss"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        t
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Typically 2-D, but may have any dimensions."
    +  name: "t"
    +  type_attr: "T"
    +}
    +output_arg { description: "0-D." name: "output" type_attr: "T" }
    +-}
    +
    +-- | Restore a reader to a previously saved state.
    +--
    +-- Not all Readers support being restored, so this can produce an
    +-- Unimplemented error.
    +readerRestoreState :: Tensor v1 Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
    +                      -> Tensor v2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
    +                                                              -- matching reader_handle.
    +                      -> ControlNode
    +readerRestoreState reader_handle state | eqLengthGuard [] =
    +    buildOp (opDef "ReaderRestoreState")
    +        reader_handle state
    +{-
    +input_arg {
    +  description: "Handle to a Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle."
    +  name: "state"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Returns the shape of a tensor.
    +--
    +-- This operation returns a 1-D integer tensor representing the shape of `input`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    +-- shape(t) ==> [2, 2, 3]
    +-- ```
    +shape :: forall v1 t out_type . (TensorType t, TensorType out_type,
    +                                 OneOf '[Data.Int.Int32,
    +                                         Data.Int.Int64] out_type) =>
    +         Tensor v1 t -- ^ __input__
    +         -> Tensor Value out_type -- ^ __output__
    +shape input | eqLengthGuard [] =
    +    buildOp (opDef "Shape"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "out_type"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "out_type" }
    +-}
    +
    +-- | Computes softmax cross entropy cost and gradients to backpropagate.
    +--
    +-- Inputs are the logits, not probabilities.
    +softmaxCrossEntropyWithLogits :: forall v1 v2 t . (TensorType t,
    +                                                   OneOf '[Data.Word.Word16,
    +                                                           Double, Float] t) =>
    +                                 Tensor v1 t -- ^ __features__: batch_size x num_classes matrix
    +                                 -> Tensor v2 t -- ^ __labels__: batch_size x num_classes matrix
    +                                                -- The caller must ensure that each batch of labels represents a valid
    +                                                -- probability distribution.
    +                                 -> (Tensor Value t, Tensor Value t)
    +                                 -- ^ (__loss__, __backprop__)
    +                                 --
    +                                 -- * __loss__: Per example loss (batch_size vector).
    +                                 --
    +                                 -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
    +softmaxCrossEntropyWithLogits features labels | eqLengthGuard [] =
    +    buildOp (opDef "SoftmaxCrossEntropyWithLogits"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features labels
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "batch_size x num_classes matrix"
    +  name: "features"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution."
    +  name: "labels"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Per example loss (batch_size vector)."
    +  name: "loss"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "backpropagated gradients (batch_size x num_classes matrix)."
    +  name: "backprop"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Performs max pooling on the input.
    +
    +maxPool :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Float] t) =>
    +           Tensor v1 t -- ^ __input__: 4-D input to pool over.
    +           -> Tensor Value t -- ^ __output__: The max pooled output tensor.
    +maxPool input | eqLengthGuard [] =
    +    buildOp (opDef "MaxPool"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The size of the window for each dimension of the input tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the\ninput tensor."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D input to pool over." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "The max pooled output tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient of morphological 2-D dilation with respect to the input.
    +
    +dilation2DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
    +                                                OneOf '[Data.Int.Int16,
    +                                                        Data.Int.Int32,
    +                                                        Data.Int.Int64,
    +                                                        Data.Int.Int8,
    +                                                        Data.Word.Word16,
    +                                                        Data.Word.Word8, Double,
    +                                                        Float] t) =>
    +                           Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
    +                           -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
    +                           -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
    +                           -> Tensor Value t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`.
    +dilation2DBackpropInput input filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Dilation2DBackpropInput"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "rates"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D with shape `[filter_height, filter_width, depth]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
    +  name: "in_backprop"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns the truth value of (x == y) element-wise.
    +--
    +-- *NOTE*: `Equal` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +equal :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                 (Data.Complex.Complex Float),
    +                                                 Bool,
    +                                                 Data.ByteString.ByteString,
    +                                                 Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t) =>
    +         Tensor v1 t -- ^ __x__
    +         -> Tensor v2 t -- ^ __y__
    +         -> Tensor Value Bool -- ^ __z__
    +equal x y | eqLengthGuard [] =
    +    buildOp (opDef "Equal"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_QUINT8
    +      type: DT_QINT8
    +      type: DT_QINT32
    +      type: DT_STRING
    +      type: DT_BOOL
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Computes the gradient of morphological 2-D dilation with respect to the filter.
    +
    +dilation2DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
    +                                                 OneOf '[Data.Int.Int16,
    +                                                         Data.Int.Int32,
    +                                                         Data.Int.Int64,
    +                                                         Data.Int.Int8,
    +                                                         Data.Word.Word16,
    +                                                         Data.Word.Word8,
    +                                                         Double, Float] t) =>
    +                            Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
    +                            -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
    +                            -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
    +                            -> Tensor Value t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`.
    +dilation2DBackpropFilter input filter out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Dilation2DBackpropFilter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
    +  has_minimum: true
    +  minimum: 4
    +  name: "rates"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, depth]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D with shape `[filter_height, filter_width, depth]`."
    +  name: "filter"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, depth]`."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "3-D with shape `[filter_height, filter_width, depth]`."
    +  name: "filter_backprop"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes rectified linear gradients for a Relu operation.
    +
    +reluGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                    Data.Int.Int32,
    +                                                    Data.Int.Int64,
    +                                                    Data.Int.Int8,
    +                                                    Data.Word.Word16,
    +                                                    Data.Word.Word8, Double,
    +                                                    Float] t) =>
    +            Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation.
    +            -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR
    +                           -- the outputs of that operation (both work equivalently).
    +            -> Tensor Value t -- ^ __backprops__: `gradients * (features > 0)`.
    +reluGrad gradients features | eqLengthGuard [] =
    +    buildOp (opDef "ReluGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        gradients features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The backpropagated gradients to the corresponding Relu operation."
    +  name: "gradients"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)."
    +  name: "features"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "`gradients * (features > 0)`."
    +  name: "backprops"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes rectified linear 6: `min(max(features, 0), 6)`.
    +
    +relu6 :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                              Data.Int.Int64, Data.Int.Int8,
    +                                              Data.Word.Word16, Data.Word.Word8,
    +                                              Double, Float] t) =>
    +         Tensor v1 t -- ^ __features__
    +         -> Tensor Value t -- ^ __activations__
    +relu6 features | eqLengthGuard [] =
    +    buildOp (opDef "Relu6"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "features" type_attr: "T" }
    +output_arg { name: "activations" type_attr: "T" }
    +-}
    +
    +-- | Resize `images` to `size` using bicubic interpolation.
    +--
    +-- Input images can be of different types but output images are always float.
    +resizeBicubic :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                         Data.Int.Int32,
    +                                                         Data.Int.Int64,
    +                                                         Data.Int.Int8,
    +                                                         Data.Word.Word16,
    +                                                         Data.Word.Word8,
    +                                                         Double, Float] t) =>
    +                 Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
    +                 -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
    +                                             -- new size for the images.
    +                 -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
    +                 -- `[batch, new_height, new_width, channels]`.
    +resizeBicubic images size | eqLengthGuard [] =
    +    buildOp (opDef "ResizeBicubic"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "images"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
    +  name: "size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
    +  name: "resized_images"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes rectified linear 6 gradients for a Relu6 operation.
    +
    +relu6Grad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                     Data.Int.Int32,
    +                                                     Data.Int.Int64,
    +                                                     Data.Int.Int8,
    +                                                     Data.Word.Word16,
    +                                                     Data.Word.Word8, Double,
    +                                                     Float] t) =>
    +             Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation.
    +             -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation.
    +             -> Tensor Value t -- ^ __backprops__: The gradients:
    +             -- `gradients * features * (features > 0) * (features < 6)`.
    +relu6Grad gradients features | eqLengthGuard [] =
    +    buildOp (opDef "Relu6Grad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        gradients features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The backpropagated gradients to the corresponding Relu6 operation."
    +  name: "gradients"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The features passed as input to the corresponding Relu6 operation."
    +  name: "features"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The gradients:\n`gradients * features * (features > 0) * (features < 6)`."
    +  name: "backprops"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
    +--
    +-- No validity checking is performed on the indices of A.  However, the following
    +-- input format is recommended for optimal behavior:
    +-- 
    +-- if adjoint_a == false:
    +--   A should be sorted in lexicographically increasing order.  Use SparseReorder
    +--   if you're not sure.
    +-- if adjoint_a == true:
    +--   A should be sorted in order of increasing dimension 1 (i.e., "column major"
    +--   order instead of "row major" order).
    +sparseTensorDenseMatMul :: forall v1 v2 v3 v4 t . (TensorType t) =>
    +                           Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
    +                           -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
    +                           -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
    +                           -> Tensor v4 t -- ^ __b__: 2-D.  A dense Matrix.
    +                           -> Tensor Value t -- ^ __product__
    +sparseTensorDenseMatMul a_indices a_values a_shape b | eqLengthGuard [] =
    +    buildOp (opDef "SparseTensorDenseMatMul"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a_indices a_values a_shape b
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { b: false }
    +  description: "Use the adjoint of A in the matrix multiply.  If A is complex, this\nis transpose(conj(A)).  Otherwise it\'s transpose(A)."
    +  name: "adjoint_a"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "Use the adjoint of B in the matrix multiply.  If B is complex, this\nis transpose(conj(B)).  Otherwise it\'s transpose(B)."
    +  name: "adjoint_b"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix."
    +  name: "a_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector."
    +  name: "a_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector."
    +  name: "a_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "2-D.  A dense Matrix." name: "b" type_attr: "T"
    +}
    +output_arg { name: "product" type_attr: "T" }
    +-}
    +
    +-- | Computes softplus: `log(exp(features) + 1)`.
    +
    +softplus :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t) =>
    +            Tensor v1 t -- ^ __features__
    +            -> Tensor Value t -- ^ __activations__
    +softplus features | eqLengthGuard [] =
    +    buildOp (opDef "Softplus"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "features" type_attr: "T" }
    +output_arg { name: "activations" type_attr: "T" }
    +-}
    +
    +-- | Multiplies slices of two tensors in batches.
    +--
    +-- Multiplies all slices of `Tensor` `x` and `y` (each slice can be
    +-- viewed as an element of a batch), and arranges the individual results
    +-- in a single output tensor of the same batch size. Each of the
    +-- individual slices can optionally be adjointed (to adjoint a matrix
    +-- means to transpose and conjugate it) before multiplication by setting
    +-- the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
    +-- 
    +-- The input tensors `x` and `y` are 3-D or higher with shape `[..., r_x, c_x]`
    +-- and `[..., r_y, c_y]`.
    +-- 
    +-- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:
    +-- 
    +--     r_o = c_x if adj_x else r_x
    +--     c_o = r_y if adj_y else c_y
    +-- 
    +-- It is computed as:
    +-- 
    +--     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
    +batchMatMul :: forall v1 v2 t . (TensorType t,
    +                                 OneOf '[(Data.Complex.Complex Double),
    +                                         (Data.Complex.Complex Float),
    +                                         Data.Int.Int32, Data.Word.Word16,
    +                                         Double, Float] t) =>
    +               Tensor v1 t -- ^ __x__: 3-D or higher with shape `[..., r_x, c_x]`.
    +               -> Tensor v2 t -- ^ __y__: 3-D or higher with shape `[..., r_y, c_y]`.
    +               -> Tensor Value t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]`
    +batchMatMul x y | eqLengthGuard [] =
    +    buildOp (opDef "BatchMatMul"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, adjoint the slices of `x`. Defaults to `False`."
    +  name: "adj_x"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, adjoint the slices of `y`. Defaults to `False`."
    +  name: "adj_y"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "3-D or higher with shape `[..., r_x, c_x]`."
    +  name: "x"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "3-D or higher with shape `[..., r_y, c_y]`."
    +  name: "y"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "3-D or higher with shape `[..., r_o, c_o]`"
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes softsign gradients for a softsign operation.
    +
    +softsignGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                        Data.Int.Int32,
    +                                                        Data.Int.Int64,
    +                                                        Data.Int.Int8,
    +                                                        Data.Word.Word16,
    +                                                        Data.Word.Word8, Double,
    +                                                        Float] t) =>
    +                Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation.
    +                -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding softsign operation.
    +                -> Tensor Value t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`.
    +softsignGrad gradients features | eqLengthGuard [] =
    +    buildOp (opDef "SoftsignGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        gradients features
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The backpropagated gradients to the corresponding softsign operation."
    +  name: "gradients"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The features passed as input to the corresponding softsign operation."
    +  name: "features"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The gradients: `gradients / (1 + abs(-features)) ** 2`."
    +  name: "backprops"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns the truth value of (x <= y) element-wise.
    +--
    +-- *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +lessEqual :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                     Data.Int.Int32,
    +                                                     Data.Int.Int64,
    +                                                     Data.Int.Int8,
    +                                                     Data.Word.Word16,
    +                                                     Data.Word.Word8, Double,
    +                                                     Float] t) =>
    +             Tensor v1 t -- ^ __x__
    +             -> Tensor v2 t -- ^ __y__
    +             -> Tensor Value Bool -- ^ __z__
    +lessEqual x y | eqLengthGuard [] =
    +    buildOp (opDef "LessEqual"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Computes log softmax activations.
    +--
    +-- For each batch `i` and class `j` we have
    +-- 
    +--     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
    +logSoftmax :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                   Float] t) =>
    +              Tensor v1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
    +              -> Tensor Value t -- ^ __logsoftmax__: Same shape as `logits`.
    +logSoftmax logits | eqLengthGuard [] =
    +    buildOp (opDef "LogSoftmax"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        logits
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D with shape `[batch_size, num_classes]`."
    +  name: "logits"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same shape as `logits`."
    +  name: "logsoftmax"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Says whether the targets are in the top `K` predictions.
    +--
    +-- This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
    +-- prediction for the target class is among the top `k` predictions among
    +-- all predictions for example `i`. Note that the behavior of `InTopK` differs
    +-- from the `TopK` op in its handling of ties; if multiple classes have the
    +-- same prediction value and straddle the top-`k` boundary, all of those
    +-- classes are considered to be in the top `k`.
    +-- 
    +-- More formally, let
    +-- 
    +--   \\(predictions_i\\) be the predictions for all classes for example `i`,
    +--   \\(targets_i\\) be the target class for example `i`,
    +--   \\(out_i\\) be the output for example `i`,
    +-- 
    +-- $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
    +inTopK :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] t) =>
    +          Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision.
    +          -> Tensor v1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor.
    +          -> Tensor v2 t -- ^ __targets__: A `batch_size` vector of class ids.
    +          -> Tensor Value Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`.
    +inTopK k predictions targets | eqLengthGuard [] =
    +    buildOp (opDef "InTopK"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "k" .~ k)
    +        predictions targets
    +{-
    +attr {
    +  description: "Number of top elements to look at for computing precision."
    +  name: "k"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "A `batch_size` x `classes` tensor."
    +  name: "predictions"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A `batch_size` vector of class ids."
    +  name: "targets"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Computed Precision at `k` as a `bool Tensor`."
    +  name: "precision"
    +  type: DT_BOOL
    +}
    +-}
    +
    +-- | Returns a batched diagonal tensor with a given batched diagonal values.
    +--
    +-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
    +-- everything else padded with zeros. The diagonal is computed as follows:
    +-- 
    +-- Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
    +-- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
    +-- 
    +-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
    +-- 
    +-- and diagonal.shape = (2, 4)
    +-- 
    +-- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
    +--                                      [0, 2, 0, 0]
    +--                                      [0, 0, 3, 0]
    +--                                      [0, 0, 0, 4]],
    +--                                     [[5, 0, 0, 0]
    +--                                      [0, 6, 0, 0]
    +--                                      [0, 0, 7, 0]
    +--                                      [0, 0, 0, 8]]]
    +-- 
    +-- which has shape (2, 4, 4)
    +-- ```
    +matrixDiag :: forall v1 t . (TensorType t) =>
    +              Tensor v1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
    +              -> Tensor Value t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
    +matrixDiag diagonal | eqLengthGuard [] =
    +    buildOp (opDef "MatrixDiag"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        diagonal
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "Rank `k`, where `k >= 1`."
    +  name: "diagonal"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Performs 3D max pooling on the input.
    +
    +maxPool3D :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t) =>
    +             Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
    +             -> Tensor Value t -- ^ __output__: The max pooled output tensor.
    +maxPool3D input | eqLengthGuard [] =
    +    buildOp (opDef "MaxPool3D"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "ksize"
    +  type: "list(int)"
    +}
    +attr {
    +  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    +  has_minimum: true
    +  minimum: 5
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The max pooled output tensor."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Finds values and indices of the `k` largest elements for the last dimension.
    +--
    +-- If the input is a vector (rank-1), finds the `k` largest entries in the vector
    +-- and outputs their values and indices as vectors.  Thus `values[j]` is the
    +-- `j`-th largest entry in `input`, and its index is `indices[j]`.
    +-- 
    +-- For matrices (resp. higher rank input), computes the top `k` entries in each
    +-- row (resp. vector along the last dimension).  Thus,
    +-- 
    +--     values.shape = indices.shape = input.shape[:-1] + [k]
    +-- 
    +-- If two elements are equal, the lower-index element appears first.
    +-- 
    +-- If `k` varies dynamically, use `TopKV2` below.
    +topK :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                             Data.Int.Int64, Data.Int.Int8,
    +                                             Data.Word.Word16, Data.Word.Word8,
    +                                             Double, Float] t) =>
    +        Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each
    +                       -- row for matrices).
    +        -> Tensor v1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
    +        -> (Tensor Value t, Tensor Value Data.Int.Int32)
    +        -- ^ (__values__, __indices__)
    +        --
    +        -- * __values__: The `k` largest elements along each last dimensional slice.
    +        --
    +        -- * __indices__: The indices of `values` within the last dimension of `input`.
    +topK k input | eqLengthGuard [] =
    +    buildOp (opDef "TopK"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "k" .~ k)
    +        input
    +{-
    +attr {
    +  description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)."
    +  has_minimum: true
    +  name: "k"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
    +  name: "sorted"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D or higher with last dimension at least `k`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The `k` largest elements along each last dimensional slice."
    +  name: "values"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The indices of `values` within the last dimension of `input`."
    +  name: "indices"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Finds values and indices of the `k` largest elements for the last dimension.
    +--
    +-- If the input is a vector (rank-1), finds the `k` largest entries in the vector
    +-- and outputs their values and indices as vectors.  Thus `values[j]` is the
    +-- `j`-th largest entry in `input`, and its index is `indices[j]`.
    +-- 
    +-- For matrices (resp. higher rank input), computes the top `k` entries in each
    +-- row (resp. vector along the last dimension).  Thus,
    +-- 
    +--     values.shape = indices.shape = input.shape[:-1] + [k]
    +-- 
    +-- If two elements are equal, the lower-index element appears first.
    +-- 
    +-- This is the same as `TopK`, but takes `k` as in input rather than an attr.
    +topKV2 :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t) =>
    +          Tensor v1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
    +          -> Tensor v2 Data.Int.Int32 -- ^ __k__: 0-D.  Number of top elements to look for along the last dimension (along each
    +                                      -- row for matrices).
    +          -> (Tensor Value t, Tensor Value Data.Int.Int32)
    +          -- ^ (__values__, __indices__)
    +          --
    +          -- * __values__: The `k` largest elements along each last dimensional slice.
    +          --
    +          -- * __indices__: The indices of `values` within the last dimension of `input`.
    +topKV2 input k | eqLengthGuard [] =
    +    buildOp (opDef "TopKV2"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input k
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
    +  name: "sorted"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D or higher with last dimension at least `k`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "0-D.  Number of top elements to look for along the last dimension (along each\nrow for matrices)."
    +  name: "k"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "The `k` largest elements along each last dimensional slice."
    +  name: "values"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "The indices of `values` within the last dimension of `input`."
    +  name: "indices"
    +  type: DT_INT32
    +}
    +-}
    +
    +-- | Performs fractional max pooling on the input.
    +--
    +-- Fractional max pooling is slightly different than regular max pooling.  In
    +-- regular max pooling, you downsize an input set by taking the maximum value of
    +-- smaller N x N subsections of the set (often 2x2), and try to reduce the set by
    +-- a factor of N, where N is an integer.  Fractional max pooling, as you might
    +-- expect from the word "fractional", means that the overall reduction ratio N
    +-- does not have to be an integer.
    +-- 
    +-- The sizes of the pooling regions are generated randomly but are fairly uniform.
    +-- For example, let's look at the height dimension, and the constraints on the
    +-- list of rows that will be pool boundaries.
    +-- 
    +-- First we define the following:
    +-- 
    +-- 1.  input_row_length : the number of rows from the input set
    +-- 2.  output_row_length : which will be smaller than the input
    +-- 3.  alpha = input_row_length / output_row_length : our reduction ratio
    +-- 4.  K = floor(alpha)
    +-- 5.  row_pooling_sequence : this is the result list of pool boundary rows
    +-- 
    +-- Then, row_pooling_sequence should satisfy:
    +-- 
    +-- 1.  a[0] = 0 : the first value of the sequence is 0
    +-- 2.  a[end] = input_row_length : the last value of the sequence is the size
    +-- 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
    +-- 4.  length(row_pooling_sequence) = output_row_length+1
    +-- 
    +-- For more details on fractional max pooling, see this paper:
    +-- [Benjamin Graham, Fractional Max-Pooling]
    +-- (http://arxiv.org/abs/1412.6071)
    +fractionalMaxPool :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64,
    +                                                          Double, Float] t) =>
    +                     Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
    +                     -> (Tensor Value t, Tensor Value Data.Int.Int64,
    +                         Tensor Value Data.Int.Int64)
    +                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
    +                     --
    +                     -- * __output__: output tensor after fractional max pooling.
    +                     --
    +                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
    +                     --
    +                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
    +fractionalMaxPool value | eqLengthGuard [] =
    +    buildOp (opDef "FractionalMaxPool"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value
    +{-
    +attr {
    +  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
    +  has_minimum: true
    +  minimum: 4
    +  name: "pooling_ratio"
    +  type: "list(float)"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling] (http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
    +  name: "pseudo_random"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
    +  name: "overlapping"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic."
    +  name: "deterministic"
    +  type: "bool"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "output tensor after fractional max pooling."
    +  name: "output"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "row pooling sequence, needed to calculate gradient."
    +  name: "row_pooling_sequence"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "column pooling sequence, needed to calculate gradient."
    +  name: "col_pooling_sequence"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Copy a tensor setting everything outside a central band in each innermost matrix
    +--
    +-- to zero.
    +-- 
    +-- The `band` part is computed as follows:
    +-- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
    +-- tensor with the same shape where
    +-- 
    +-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
    +-- 
    +-- The indicator function 'in_band(m, n)` is one if
    +-- `(num_lower < 0 || (m-n) <= num_lower)) &&
    +-- (num_upper < 0 || (n-m) <= num_upper)`, and zero otherwise.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # if 'input' is [[ 0,  1,  2, 3]
    +--                  [-1,  0,  1, 2]
    +--                  [-2, -1,  0, 1]
    +--                  [-3, -2, -1, 0]],
    +-- 
    +-- tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
    +--                                              [-1,  0,  1, 2]
    +--                                              [ 0, -1,  0, 1]
    +--                                              [ 0,  0, -1, 0]],
    +-- 
    +-- tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
    +--                                             [-1,  0,  1, 0]
    +--                                             [-2, -1,  0, 1]
    +--                                             [ 0, -2, -1, 0]]
    +-- ```
    +-- 
    +-- Useful special cases:
    +-- 
    +-- ```prettyprint
    +--  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
    +--  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
    +--  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
    +-- ```
    +matrixBandPart :: forall v1 v2 v3 t . (TensorType t) =>
    +                  Tensor v1 t -- ^ __input__: Rank `k` tensor.
    +                  -> Tensor v2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
    +                                              -- lower triangle.
    +                  -> Tensor v3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep
    +                                              -- entire upper triangle.
    +                  -> Tensor Value t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor.
    +matrixBandPart input num_lower num_upper | eqLengthGuard [] =
    +    buildOp (opDef "MatrixBandPart"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input num_lower num_upper
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "Rank `k` tensor." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle."
    +  name: "num_lower"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle."
    +  name: "num_upper"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "Rank `k` tensor of the same shape as input. The extracted banded tensor."
    +  name: "band"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Reinterpret the bytes of a string as a vector of numbers.
    +
    +decodeRaw :: forall v1 out_type . (TensorType out_type, OneOf '[Data.Int.Int16,
    +                                                                Data.Int.Int32,
    +                                                                Data.Int.Int64,
    +                                                                Data.Int.Int8,
    +                                                                Data.Word.Word8,
    +                                                                Double,
    +                                                                Float] out_type) =>
    +             Tensor v1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length.
    +             -> Tensor Value out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`.  The
    +             -- added dimension will have size equal to the length of the elements
    +             -- of `bytes` divided by the number of bytes to represent `out_type`.
    +decodeRaw bytes | eqLengthGuard [] =
    +    buildOp (opDef "DecodeRaw"
    +             & opAttr "out_type" .~ tensorType (undefined :: out_type))
    +        bytes
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_INT64
    +    }
    +  }
    +  name: "out_type"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`."
    +  name: "little_endian"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "All the elements must have the same length."
    +  name: "bytes"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "A Tensor with one more dimension than the input `bytes`.  The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`."
    +  name: "output"
    +  type_attr: "out_type"
    +}
    +-}
    +
    +-- | Convert JSON-encoded Example records to binary protocol buffer strings.
    +--
    +-- This op translates a tensor containing Example records, encoded using
    +-- the [standard JSON
    +-- mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
    +-- into a tensor containing the same records encoded as binary protocol
    +-- buffers. The resulting tensor can then be fed to any of the other
    +-- Example-parsing ops.
    +decodeJSONExample :: Tensor v1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON
    +                                                          -- mapping of the Example proto.
    +                     -> Tensor Value Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding
    +                     -- to the respective element of `json_examples`.
    +decodeJSONExample json_examples | eqLengthGuard [] =
    +    buildOp (opDef "DecodeJSONExample")
    +        json_examples
    +{-
    +input_arg {
    +  description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto."
    +  name: "json_examples"
    +  type: DT_STRING
    +}
    +output_arg {
    +  description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`."
    +  name: "binary_examples"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Outputs random values from a truncated normal distribution.
    +--
    +-- The generated values follow a normal distribution with mean 0 and standard
    +-- deviation 1, except that values whose magnitude is more than 2 standard
    +-- deviations from the mean are dropped and re-picked.
    +truncatedNormal :: forall v1 t dtype . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                              Data.Int.Int64] t,
    +                                        TensorType dtype,
    +                                        OneOf '[Data.Word.Word16, Double,
    +                                                Float] dtype) =>
    +                   Tensor v1 t -- ^ __shape__: The shape of the output tensor.
    +                   -> Tensor Value dtype -- ^ __output__: A tensor of the specified shape filled with random truncated normal
    +                   -- values.
    +truncatedNormal shape | eqLengthGuard [] =
    +    buildOp (opDef "TruncatedNormal"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        shape
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  description: "The type of the output."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The shape of the output tensor."
    +  name: "shape"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A tensor of the specified shape filled with random truncated normal\nvalues."
    +  name: "output"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Randomly shuffles a tensor along its first dimension.
    +--
    +--   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
    +--   to one and only one `output[i]`. For example, a mapping that might occur for a
    +--   3x2 tensor is:
    +-- 
    +-- ```prettyprint
    +-- [[1, 2],       [[5, 6],
    +--  [3, 4],  ==>   [1, 2],
    +--  [5, 6]]        [3, 4]]
    +-- ```
    +randomShuffle :: forall v1 t . (TensorType t) =>
    +                 Tensor v1 t -- ^ __value__: The tensor to be shuffled.
    +                 -> Tensor Value t -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first
    +                 -- dimension.
    +randomShuffle value | eqLengthGuard [] =
    +    buildOp (opDef "RandomShuffle"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "The tensor to be shuffled."
    +  name: "value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Draws samples from a multinomial distribution.
    +
    +multinomial :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                       Data.Int.Int32,
    +                                                       Data.Int.Int64,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +               Tensor v1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
    +                           -- represents the unnormalized log probabilities for all classes.
    +               -> Tensor v2 Data.Int.Int32 -- ^ __num_samples__: 0-D.  Number of independent samples to draw for each row slice.
    +               -> Tensor Value Data.Int.Int64 -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
    +               -- contains the drawn class labels with range `[0, num_classes)`.
    +multinomial logits num_samples | eqLengthGuard [] =
    +    buildOp (opDef "Multinomial"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        logits num_samples
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes."
    +  name: "logits"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "0-D.  Number of independent samples to draw for each row slice."
    +  name: "num_samples"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`."
    +  name: "output"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Outputs random values from the Gamma distribution(s) described by alpha.
    +--
    +-- This op uses the algorithm by Marsaglia et al. to acquire samples via
    +-- transformation-rejection from pairs of uniform and normal random variables.
    +-- See http://dl.acm.org/citation.cfm?id=358414
    +randomGamma :: forall v1 v2 s t . (TensorType s, OneOf '[Data.Int.Int32,
    +                                                         Data.Int.Int64] s,
    +                                   TensorType t, OneOf '[Data.Word.Word16,
    +                                                         Double, Float] t) =>
    +               Tensor v1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
    +                           -- distribution described by the shape parameters given in alpha.
    +               -> Tensor v2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the
    +                              -- associated gamma distribution.
    +               -> Tensor Value t -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice
    +               -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
    +               -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
    +randomGamma shape alpha | eqLengthGuard [] =
    +    buildOp (opDef "RandomGamma"
    +             & opAttr "S" .~ tensorType (undefined :: s)
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        shape alpha
    +{-
    +attr {
    +  default_value { i: 0 }
    +  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "A second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "S"
    +  type: "type"
    +}
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha."
    +  name: "shape"
    +  type_attr: "S"
    +}
    +input_arg {
    +  description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution."
    +  name: "alpha"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Add all input tensors element wise.
    +
    +addN :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int16, Data.Int.Int32,
    +                                             Data.Int.Int64, Data.Int.Int8,
    +                                             Data.Word.Word16, Data.Word.Word8,
    +                                             Double, Float] t) =>
    +        [Tensor v1 t] -- ^ __inputs__: Must all be the same size and shape.
    +        -> Tensor Value t -- ^ __sum__
    +addN inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
    +    buildOp (opDef "AddN"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "N" .~ (fromIntegral (length inputs) :: Int64))
    +        inputs
    +{-
    +attr { has_minimum: true minimum: 1 name: "N" type: "int" }
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Must all be the same size and shape."
    +  name: "inputs"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +output_arg { name: "sum" type_attr: "T" }
    +-}
    +
    +-- | Computes the maximum of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +max :: forall v1 v2 t tidx . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Int.Int16, Data.Int.Int32,
    +                                      Data.Int.Int64, Data.Int.Int8,
    +                                      Data.Word.Word16, Data.Word.Word8, Double,
    +                                      Float] t, TensorType tidx,
    +                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
    +       Tensor v1 t -- ^ __input__: The tensor to reduce.
    +       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +       -> Tensor Value t -- ^ __output__: The reduced tensor.
    +max input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Max"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | A graph node which represents a return value of a function.
    +
    +_Retval :: forall v1 t . (TensorType t) =>
    +           Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function.
    +           -> Tensor v1 t -- ^ __input__: The return value.
    +           -> ControlNode
    +_Retval index input | eqLengthGuard [] =
    +    buildOp (opDef "_Retval"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "index" .~ index)
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "This return value is the index-th return value of the function."
    +  has_minimum: true
    +  name: "index"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The return value." name: "input" type_attr: "T"
    +}
    +-}
    +
    +-- | Destroys the temporary variable and returns its final value.
    +--
    +-- Sets output to the value of the Tensor pointed to by 'ref', then destroys
    +-- the temporary variable called 'var_name'.
    +-- All other uses of 'ref' *must* have executed before this op.
    +-- This is typically achieved by chaining the ref through each assign op, or by
    +-- using control dependencies.
    +-- 
    +-- Outputs the final value of the tensor pointed to by 'ref'.
    +destroyTemporaryVariable :: forall v1 t . (TensorType t) =>
    +                            Tensor v1 t -- ^ __ref__: A reference to the temporary variable tensor.
    +                            -> Tensor Value t -- ^ __value__
    +destroyTemporaryVariable ref | eqLengthGuard [] =
    +    buildOp (opDef "DestroyTemporaryVariable"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        ref
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op."
    +  name: "var_name"
    +  type: "string"
    +}
    +input_arg {
    +  description: "A reference to the temporary variable tensor."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +output_arg { name: "value" type_attr: "T" }
    +-}
    +
    +-- | Cast x of type SrcT to y of DstT.
    +
    +cast :: forall v1 dstT srcT . (TensorType dstT, TensorType srcT) =>
    +        Tensor v1 srcT -- ^ __x__
    +        -> Tensor Value dstT -- ^ __y__
    +cast x | eqLengthGuard [] =
    +    buildOp (opDef "Cast"
    +             & opAttr "DstT" .~ tensorType (undefined :: dstT)
    +             & opAttr "SrcT" .~ tensorType (undefined :: srcT))
    +        x
    +{-
    +attr { name: "SrcT" type: "type" }
    +attr { name: "DstT" type: "type" }
    +input_arg { name: "x" type_attr: "SrcT" }
    +output_arg { name: "y" type_attr: "DstT" }
    +-}
    +
    +-- | Increments 'ref' until it reaches 'limit'.
    +--
    +-- This operation outputs "ref" after the update is done.  This makes it
    +-- easier to chain operations that need to use the updated value.
    +countUpTo :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] t) =>
    +             Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an
    +                            -- 'OutOfRange' error.
    +             -> Tensor v1 t -- ^ __ref__: Should be from a scalar `Variable` node.
    +             -> Tensor Value t -- ^ __output__: A copy of the input before increment. If nothing else modifies the
    +             -- input, the values produced will all be distinct.
    +countUpTo limit ref | eqLengthGuard [] =
    +    buildOp (opDef "CountUpTo"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "limit" .~ limit)
    +        ref
    +{-
    +attr {
    +  description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error."
    +  name: "limit"
    +  type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Should be from a scalar `Variable` node."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the absolute value of a tensor.
    +--
    +-- Given a tensor `x`, this operation returns a tensor containing the absolute
    +-- value of each element in `x`. For example, if x is an input element and y is
    +-- an output element, this operation computes \\(y = |x|\\).
    +abs :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64,
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +abs x | eqLengthGuard [] =
    +    buildOp (opDef "Abs"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes numerical negative value element-wise.
    +--
    +-- I.e., \\(y = -x\\).
    +neg :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Int.Int32, Data.Int.Int64,
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +neg x | eqLengthGuard [] =
    +    buildOp (opDef "Neg"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Returns the element-wise max of two SparseTensors.
    +--
    +-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
    +sparseSparseMaximum :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t,
    +                                                     OneOf '[Data.Int.Int16,
    +                                                             Data.Int.Int32,
    +                                                             Data.Int.Int64,
    +                                                             Data.Int.Int8,
    +                                                             Data.Word.Word16,
    +                                                             Data.Word.Word8,
    +                                                             Double,
    +                                                             Float] t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                -- SparseTensor, in the canonical lexicographic ordering.
    +                       -> Tensor v2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
    +                       -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
    +                       -> Tensor v5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
    +                       -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
    +                       -> (Tensor Value Data.Int.Int64, Tensor Value t)
    +                       -- ^ (__output_indices__, __output_values__)
    +                       --
    +                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
    +                       --
    +                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
    +sparseSparseMaximum a_indices a_values a_shape b_indices b_values
    +                    b_shape | eqLengthGuard [] =
    +    buildOp (opDef "SparseSparseMaximum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a_indices a_values a_shape b_indices b_values b_shape
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
    +  name: "a_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
    +  name: "a_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "a_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "counterpart to `a_indices` for the other operand."
    +  name: "b_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
    +  name: "b_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
    +  name: "b_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "2-D.  The indices of the output SparseTensor."
    +  name: "output_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  The values of the output SparseTensor."
    +  name: "output_values"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient for the inverse of `x` wrt its input.
    +--
    +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
    +-- is the corresponding input gradient.
    +invGrad :: forall v1 v2 t . (TensorType t,
    +                             OneOf '[(Data.Complex.Complex Double),
    +                                     (Data.Complex.Complex Float),
    +                                     Data.Word.Word16, Double, Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor v2 t -- ^ __y__
    +           -> Tensor Value t -- ^ __z__
    +invGrad x y | eqLengthGuard [] =
    +    buildOp (opDef "InvGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Computes square root of x element-wise.
    +--
    +-- I.e., \\(y = \sqrt{x} = x^{1/2}\\).
    +sqrt :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +sqrt x | eqLengthGuard [] =
    +    buildOp (opDef "Sqrt"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes the inverse of one or more square invertible matrices or their
    +--
    +-- adjoints (conjugate transposes).
    +-- 
    +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
    +-- form square matrices. The output is a tensor of the same shape as the input
    +-- containing the inverse for all input submatrices `[..., :, :]`.
    +-- 
    +-- The op uses LU decomposition with partial pivoting to compute the inverses.
    +-- 
    +-- If a matrix is not invertible there is no guarantee what the op does. It
    +-- may detect the condition and raise an exception or it may simply return a
    +-- garbage result.
    +matrixInverse :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                 Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
    +                 -> Tensor Value t -- ^ __output__: Shape is `[..., M, M]`.
    +matrixInverse input | eqLengthGuard [] =
    +    buildOp (opDef "MatrixInverse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { default_value { b: false } name: "adjoint" type: "bool" }
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[..., M, M]`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the gradient for the sqrt of `x` wrt its input.
    +--
    +-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
    +-- is the corresponding input gradient.
    +sqrtGrad :: forall v1 v2 t . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Word.Word16, Double, Float] t) =>
    +            Tensor v1 t -- ^ __x__
    +            -> Tensor v2 t -- ^ __y__
    +            -> Tensor Value t -- ^ __z__
    +sqrtGrad x y | eqLengthGuard [] =
    +    buildOp (opDef "SqrtGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Inserts a dimension of 1 into a tensor's shape.
    +--
    +-- Given a tensor `input`, this operation inserts a dimension of 1 at the
    +-- dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
    +-- zero; if you specify a negative number for `dim` it is counted backward from
    +-- the end.
    +-- 
    +-- This operation is useful if you want to add a batch dimension to a single
    +-- element. For example, if you have a single image of shape `[height, width,
    +-- channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
    +-- which will make the shape `[1, height, width, channels]`.
    +-- 
    +-- Other examples:
    +-- 
    +-- ```prettyprint
    +-- # 't' is a tensor of shape [2]
    +-- shape(expand_dims(t, 0)) ==> [1, 2]
    +-- shape(expand_dims(t, 1)) ==> [2, 1]
    +-- shape(expand_dims(t, -1)) ==> [2, 1]
    +-- 
    +-- # 't2' is a tensor of shape [2, 3, 5]
    +-- shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
    +-- shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
    +-- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
    +-- ```
    +-- 
    +-- This operation requires that:
    +-- 
    +-- `-1-input.dims() <= dim <= input.dims()`
    +-- 
    +-- This operation is related to `squeeze()`, which removes dimensions of
    +-- size 1.
    +expandDims :: forall v1 v2 t tdim . (TensorType t, TensorType tdim,
    +                                     OneOf '[Data.Int.Int32,
    +                                             Data.Int.Int64] tdim) =>
    +              Tensor v1 t -- ^ __input__
    +              -> Tensor v2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to
    +                                -- expand the shape of `input`.
    +              -> Tensor Value t -- ^ __output__: Contains the same data as `input`, but its shape has an additional
    +              -- dimension of size 1 added.
    +expandDims input dim | eqLengthGuard [] =
    +    buildOp (opDef "ExpandDims"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tdim" .~ tensorType (undefined :: tdim))
    +        input dim
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tdim"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg {
    +  description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`."
    +  name: "dim"
    +  type_attr: "Tdim"
    +}
    +output_arg {
    +  description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the "logical and" of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +all :: forall v1 v2 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
    +                                                     Data.Int.Int64] tidx) =>
    +       Tensor v1 Bool -- ^ __input__: The tensor to reduce.
    +       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +       -> Tensor Value Bool -- ^ __output__: The reduced tensor.
    +all input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "All"
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type: DT_BOOL
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type: DT_BOOL
    +}
    +-}
    +
    +-- | Performs beam search decoding on the logits given in input.
    +--
    +-- A note about the attribute merge_repeated: For the beam search decoder,
    +-- this means that if consecutive entries in a beam are the same, only
    +-- the first of these is emitted.  That is, when the top path is "A B B B B",
    +-- "A B" is returned if merge_repeated = True but "A B B B B" is
    +-- returned if merge_repeated = False.
    +cTCBeamSearchDecoder :: Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width).
    +                        -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size).
    +                        -> Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    +                        -> Tensor v2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`.
    +                        -> ([Tensor Value Data.Int.Int64],
    +                            [Tensor Value Data.Int.Int64],
    +                            [Tensor Value Data.Int.Int64], Tensor Value Float)
    +                        -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
    +                        --
    +                        -- * __decoded_indices__: A list (length: top_paths) of indices matrices.  Matrix j,
    +                        -- size `(total_decoded_outputs[j] x 2)`, has indices of a
    +                        -- `SparseTensor<int64, 2>`.  The rows store: [batch, time].
    +                        --
    +                        -- * __decoded_values__: A list (length: top_paths) of values vectors.  Vector j,
    +                        -- size `(length total_decoded_outputs[j])`, has the values of a
    +                        -- `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
    +                        --
    +                        -- * __decoded_shape__: A list (length: top_paths) of shape vector.  Vector j,
    +                        -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
    +                        -- Its values are: `[batch_size, max_decoded_length[j]]`.
    +                        --
    +                        -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`.  The
    +                        -- sequence log-probabilities.
    +cTCBeamSearchDecoder beam_width top_paths inputs
    +                     sequence_length | eqLengthGuard [] =
    +    buildListOp [top_paths, top_paths, top_paths] (opDef "CTCBeamSearchDecoder"
    +                                                   & opAttr "beam_width" .~ beam_width
    +                                                   & opAttr "top_paths" .~ top_paths)
    +        inputs sequence_length
    +{-
    +attr {
    +  description: "A scalar >= 0 (beam search beam width)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "beam_width"
    +  type: "int"
    +}
    +attr {
    +  description: "A scalar >= 0, <= beam_width (controls output size)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "top_paths"
    +  type: "int"
    +}
    +attr {
    +  default_value { b: true }
    +  description: "If true, merge repeated classes in output."
    +  name: "merge_repeated"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    +  name: "inputs"
    +  type: DT_FLOAT
    +}
    +input_arg {
    +  description: "A vector containing sequence lengths, size `(batch)`."
    +  name: "sequence_length"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "A list (length: top_paths) of indices matrices.  Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor<int64, 2>`.  The rows store: [batch, time]."
    +  name: "decoded_indices"
    +  number_attr: "top_paths"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A list (length: top_paths) of values vectors.  Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j."
    +  name: "decoded_values"
    +  number_attr: "top_paths"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A list (length: top_paths) of shape vector.  Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`."
    +  name: "decoded_shape"
    +  number_attr: "top_paths"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A matrix, shaped: `(batch_size x top_paths)`.  The\nsequence log-probabilities."
    +  name: "log_probability"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Computes reciprocal of square root of x element-wise.
    +--
    +-- I.e., \\(y = 1 / \sqrt{x}\\).
    +rsqrt :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                              (Data.Complex.Complex Float),
    +                                              Data.Word.Word16, Double,
    +                                              Float] t) =>
    +         Tensor v1 t -- ^ __x__
    +         -> Tensor Value t -- ^ __y__
    +rsqrt x | eqLengthGuard [] =
    +    buildOp (opDef "Rsqrt"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradient for the tanh of `x` wrt its input.
    +--
    +-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
    +-- is the corresponding input gradient.
    +tanhGrad :: forall v1 v2 t . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Word.Word16, Double, Float] t) =>
    +            Tensor v1 t -- ^ __x__
    +            -> Tensor v2 t -- ^ __y__
    +            -> Tensor Value t -- ^ __z__
    +tanhGrad x y | eqLengthGuard [] =
    +    buildOp (opDef "TanhGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Computes sin of x element-wise.
    +
    +sin :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +sin x | eqLengthGuard [] =
    +    buildOp (opDef "Sin"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes the determinant of one ore more square matrices.
    +--
    +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
    +-- form square matrices. The output is a tensor containing the determinants
    +-- for all input submatrices `[..., :, :]`.
    +matrixDeterminant :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                     Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
    +                     -> Tensor Value t -- ^ __output__: Shape is `[...]`.
    +matrixDeterminant input | eqLengthGuard [] =
    +    buildOp (opDef "MatrixDeterminant"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Shape is `[...]`." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Computes cos of x element-wise.
    +
    +cos :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                            (Data.Complex.Complex Float),
    +                                            Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +cos x | eqLengthGuard [] =
    +    buildOp (opDef "Cos"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | BatchToSpace for 4-D tensors of type T.
    +--
    +-- This is a legacy version of the more general BatchToSpaceND.
    +-- 
    +-- Rearranges (permutes) data from batch into blocks of spatial data, followed by
    +-- cropping. This is the reverse transformation of SpaceToBatch. More specifically,
    +-- this op outputs a copy of the input tensor where values from the `batch`
    +-- dimension are moved in spatial blocks to the `height` and `width` dimensions,
    +-- followed by cropping along the `height` and `width` dimensions.
    +batchToSpace :: forall v1 v2 t tidx . (TensorType t, TensorType tidx,
    +                                       OneOf '[Data.Int.Int32,
    +                                               Data.Int.Int64] tidx) =>
    +                Data.Int.Int64 -- ^ __block_size__
    +                -> Tensor v1 t -- ^ __input__: 4-D tensor with shape
    +                               -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
    +                               --   depth]`. Note that the batch size of the input tensor must be divisible by
    +                               -- `block_size * block_size`.
    +                -> Tensor v2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
    +                                  -- how many elements to crop from the intermediate result across the spatial
    +                                  -- dimensions as follows:
    +                                  -- 
    +                                  --     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
    +                -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where:
    +                -- 
    +                --       height = height_pad - crop_top - crop_bottom
    +                --       width = width_pad - crop_left - crop_right
    +                -- 
    +                -- The attr `block_size` must be greater than one. It indicates the block size.
    +                -- 
    +                -- Some examples:
    +                -- 
    +                -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
    +                -- 
    +                -- ```prettyprint
    +                -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    +                -- ```
    +                -- 
    +                -- The output tensor has shape `[1, 2, 2, 1]` and value:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[[1], [2]], [[3], [4]]]]
    +                -- ```
    +                -- 
    +                -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
    +                -- 
    +                -- ```prettyprint
    +                -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
    +                -- ```
    +                -- 
    +                -- The output tensor has shape `[1, 2, 2, 3]` and value:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[[1, 2, 3], [4, 5, 6]],
    +                --       [[7, 8, 9], [10, 11, 12]]]]
    +                -- ```
    +                -- 
    +                -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[[1], [3]], [[5], [7]]],
    +                --      [[[2], [4]], [[10], [12]]],
    +                --      [[[5], [7]], [[13], [15]]],
    +                --      [[[6], [8]], [[14], [16]]]]
    +                -- ```
    +                -- 
    +                -- The output tensor has shape `[1, 4, 4, 1]` and value:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[1],   [2],  [3],  [4]],
    +                --      [[5],   [6],  [7],  [8]],
    +                --      [[9],  [10], [11],  [12]],
    +                --      [[13], [14], [15],  [16]]]
    +                -- ```
    +                -- 
    +                -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
    +                --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
    +                -- ```
    +                -- 
    +                -- The output tensor has shape `[2, 2, 4, 1]` and value:
    +                -- 
    +                -- ```prettyprint
    +                -- x = [[[[1], [3]], [[5], [7]]],
    +                --      [[[2], [4]], [[10], [12]]],
    +                --      [[[5], [7]], [[13], [15]]],
    +                --      [[[6], [8]], [[14], [16]]]]
    +                -- ```
    +batchToSpace block_size input crops | eqLengthGuard [] =
    +    buildOp (opDef "BatchToSpace"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx)
    +             & opAttr "block_size" .~ block_size)
    +        input crops
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  has_minimum: true minimum: 2 name: "block_size" type: "int"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n  depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]"
    +  name: "crops"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "4-D with shape `[batch, height, width, depth]`, where:\n\n      height = height_pad - crop_top - crop_bottom\n      width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```"
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Converts a sparse representation into a dense tensor.
    +--
    +-- Builds an array `dense` with shape `output_shape` such that
    +-- 
    +-- ```prettyprint
    +-- # If sparse_indices is scalar
    +-- dense[i] = (i == sparse_indices ? sparse_values : default_value)
    +-- 
    +-- # If sparse_indices is a vector, then for each i
    +-- dense[sparse_indices[i]] = sparse_values[i]
    +-- 
    +-- # If sparse_indices is an n by d matrix, then for each i in [0, n)
    +-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
    +-- ```
    +-- 
    +-- All other values in `dense` are set to `default_value`.  If `sparse_values` is a
    +-- scalar, all sparse indices are set to this single value.
    +-- 
    +-- Indices should be sorted in lexicographic order, and indices must not
    +-- contain any repeats. If `validate_indices` is true, these properties
    +-- are checked during execution.
    +sparseToDense :: forall v1 v2 v3 v4 t tindices . (TensorType t,
    +                                                  TensorType tindices,
    +                                                  OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64] tindices) =>
    +                 Tensor v1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
    +                                    -- index where `sparse_values[i]` will be placed.
    +                 -> Tensor v2 tindices -- ^ __output_shape__: 1-D.  Shape of the dense output tensor.
    +                 -> Tensor v3 t -- ^ __sparse_values__: 1-D.  Values corresponding to each row of `sparse_indices`,
    +                                -- or a scalar value to be used for all sparse indices.
    +                 -> Tensor v4 t -- ^ __default_value__: Scalar value to set for indices not specified in
    +                                -- `sparse_indices`.
    +                 -> Tensor Value t -- ^ __dense__: Dense output tensor of shape `output_shape`.
    +sparseToDense sparse_indices output_shape sparse_values
    +              default_value | eqLengthGuard [] =
    +    buildOp (opDef "SparseToDense"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        sparse_indices output_shape sparse_values default_value
    +{-
    +attr {
    +  default_value { b: true }
    +  description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats."
    +  name: "validate_indices"
    +  type: "bool"
    +}
    +attr { name: "T" type: "type" }
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg {
    +  description: "0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed."
    +  name: "sparse_indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the dense output tensor."
    +  name: "output_shape"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "1-D.  Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices."
    +  name: "sparse_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scalar value to set for indices not specified in\n`sparse_indices`."
    +  name: "default_value"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Dense output tensor of shape `output_shape`."
    +  name: "dense"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes asin of x element-wise.
    +
    +asin :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int32, Data.Int.Int64,
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +asin x | eqLengthGuard [] =
    +    buildOp (opDef "Asin"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Returns the index with the smallest value across dimensions of a tensor.
    +
    +argMin :: forall v1 v2 t tidx . (TensorType t,
    +                                 OneOf '[(Data.Complex.Complex Double),
    +                                         (Data.Complex.Complex Float),
    +                                         Data.Int.Int16, Data.Int.Int32,
    +                                         Data.Int.Int64, Data.Int.Int8,
    +                                         Data.Word.Word16, Data.Word.Word8,
    +                                         Double, Float] t, TensorType tidx,
    +                                 OneOf '[Data.Int.Int32,
    +                                         Data.Int.Int64] tidx) =>
    +          Tensor v1 t -- ^ __input__
    +          -> Tensor v2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input).  Describes which dimension
    +                            -- of the input Tensor to reduce across. For vectors, use dimension = 0.
    +          -> Tensor Value Data.Int.Int64 -- ^ __output__
    +argMin input dimension | eqLengthGuard [] =
    +    buildOp (opDef "ArgMin"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input dimension
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg {
    +  description: "int32, 0 <= dimension < rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
    +  name: "dimension"
    +  type_attr: "Tidx"
    +}
    +output_arg { name: "output" type: DT_INT64 }
    +-}
    +
    +-- | Returns which elements of x are Inf.
    +
    +isInf :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                              Float] t) =>
    +         Tensor v1 t -- ^ __x__
    +         -> Tensor Value Bool -- ^ __y__
    +isInf x | eqLengthGuard [] =
    +    buildOp (opDef "IsInf"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type: DT_BOOL }
    +-}
    +
    +-- | Returns an element-wise indication of the sign of a number.
    +--
    +-- `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
    +-- 
    +-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
    +sign :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                             (Data.Complex.Complex Float),
    +                                             Data.Int.Int32, Data.Int.Int64,
    +                                             Data.Word.Word16, Double,
    +                                             Float] t) => Tensor v1 t -- ^ __x__
    +        -> Tensor Value t -- ^ __y__
    +sign x | eqLengthGuard [] =
    +    buildOp (opDef "Sign"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Returns x + y element-wise.
    +--
    +-- *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +add :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.ByteString.ByteString,
    +                                               Data.Int.Int16, Data.Int.Int32,
    +                                               Data.Int.Int64, Data.Int.Int8,
    +                                               Data.Word.Word16,
    +                                               Data.Word.Word8, Double,
    +                                               Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +add x y | eqLengthGuard [] =
    +    buildOp (opDef "Add"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_STRING
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
    +--
    +-- That is for rows we have grad for, we update var, accum and linear as follows:
    +-- accum_new = accum + grad * grad
    +-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
    +-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
    +-- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
    +-- accum = accum_new
    +sparseApplyFtrl :: forall v1 v2 v3 v4 v5 v6 v7 v8 v9 t tindices . (TensorType t,
    +                                                                   OneOf '[(Data.Complex.Complex Double),
    +                                                                           (Data.Complex.Complex Float),
    +                                                                           Data.Int.Int16,
    +                                                                           Data.Int.Int32,
    +                                                                           Data.Int.Int64,
    +                                                                           Data.Int.Int8,
    +                                                                           Data.Word.Word16,
    +                                                                           Data.Word.Word8,
    +                                                                           Double,
    +                                                                           Float] t,
    +                                                                   TensorType tindices,
    +                                                                   OneOf '[Data.Int.Int32,
    +                                                                           Data.Int.Int64] tindices) =>
    +                   Tensor v1 t -- ^ __var__: Should be from a Variable().
    +                   -> Tensor v2 t -- ^ __accum__: Should be from a Variable().
    +                   -> Tensor v3 t -- ^ __linear__: Should be from a Variable().
    +                   -> Tensor v4 t -- ^ __grad__: The gradient.
    +                   -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
    +                   -> Tensor v6 t -- ^ __lr__: Scaling factor. Must be a scalar.
    +                   -> Tensor v7 t -- ^ __l1__: L1 regularization. Must be a scalar.
    +                   -> Tensor v8 t -- ^ __l2__: L2 regularization. Must be a scalar.
    +                   -> Tensor v9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
    +                   -> Tensor Value t -- ^ __out__: Same as "var".
    +sparseApplyFtrl var accum linear grad indices lr l1 l2
    +                lr_power | eqLengthGuard [] =
    +    buildOp (opDef "SparseApplyFtrl"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        var accum linear grad indices lr l1 l2 lr_power
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
    +  name: "use_locking"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "var"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "accum"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Should be from a Variable()."
    +  is_ref: true
    +  name: "linear"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "The gradient." name: "grad" type_attr: "T"
    +}
    +input_arg {
    +  description: "A vector of indices into the first dimension of var and accum."
    +  name: "indices"
    +  type_attr: "Tindices"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L1 regularization. Must be a scalar."
    +  name: "l1"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "L2 regularization. Must be a scalar."
    +  name: "l2"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Scaling factor. Must be a scalar."
    +  name: "lr_power"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Same as \"var\"."
    +  is_ref: true
    +  name: "out"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns x - y element-wise.
    +--
    +-- *NOTE*: `Sub` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +sub :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int32, Data.Int.Int64,
    +                                               Data.Word.Word16, Double,
    +                                               Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +sub x y | eqLengthGuard [] =
    +    buildOp (opDef "Sub"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +              -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchFFT3D input | eqLengthGuard [] =
    +    buildOp (opDef "BatchFFT3D")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | Computes the sum of elements across dimensions of a SparseTensor.
    +--
    +-- This Op takes a SparseTensor and is the sparse counterpart to
    +-- `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
    +-- SparseTensor.
    +-- 
    +-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
    +-- with length 1.
    +-- 
    +-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
    +-- with a single element is returned.  Additionally, the axes can be negative,
    +-- which are interpreted according to the indexing rules in Python.
    +sparseReduceSumSparse :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                                 OneOf '[(Data.Complex.Complex Double),
    +                                                         (Data.Complex.Complex Float),
    +                                                         Data.Int.Int16,
    +                                                         Data.Int.Int32,
    +                                                         Data.Int.Int64,
    +                                                         Data.Int.Int8,
    +                                                         Data.Word.Word16,
    +                                                         Data.Word.Word8,
    +                                                         Double, Float] t) =>
    +                         Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                  -- SparseTensor, possibly not in canonical ordering.
    +                         -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
    +                         -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
    +                         -> Tensor v4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
    +                         -> (Tensor Value Data.Int.Int64, Tensor Value t,
    +                             Tensor Value Data.Int.Int64)
    +                         -- ^ (__output_indices__, __output_values__, __output_shape__)
    +                         --
    +                         -- * __output_indices__
    +                         --
    +                         -- * __output_values__
    +                         --
    +                         -- * __output_shape__
    +sparseReduceSumSparse input_indices input_values input_shape
    +                      reduction_axes | eqLengthGuard [] =
    +    buildOp (opDef "SparseReduceSumSparse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input_indices input_values input_shape reduction_axes
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "input_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
    +  name: "input_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "input_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  Length-`K` vector containing the reduction axes."
    +  name: "reduction_axes"
    +  type: DT_INT32
    +}
    +output_arg { name: "output_indices" type: DT_INT64 }
    +output_arg { name: "output_values" type_attr: "T" }
    +output_arg { name: "output_shape" type: DT_INT64 }
    +-}
    +
    +-- | Adds `bias` to `value`.
    +--
    +-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
    +-- Broadcasting is supported, so `value` may have any number of dimensions.
    +biasAdd :: forall v1 v2 t . (TensorType t,
    +                             OneOf '[(Data.Complex.Complex Double),
    +                                     (Data.Complex.Complex Float),
    +                                     Data.Int.Int16, Data.Int.Int32,
    +                                     Data.Int.Int64, Data.Int.Int8,
    +                                     Data.Word.Word16, Data.Word.Word8, Double,
    +                                     Float] t) =>
    +           Tensor v1 t -- ^ __value__: Any number of dimensions.
    +           -> Tensor v2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
    +           -> Tensor Value t -- ^ __output__: Broadcasted sum of `value` and `bias`.
    +biasAdd value bias | eqLengthGuard [] =
    +    buildOp (opDef "BiasAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        value bias
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Any number of dimensions."
    +  name: "value"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D with size the last dimension of `value`."
    +  name: "bias"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Broadcasted sum of `value` and `bias`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns x * y element-wise.
    +--
    +-- *NOTE*: `Mul` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +mul :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int16, Data.Int.Int32,
    +                                               Data.Int.Int64, Data.Int.Int8,
    +                                               Data.Word.Word16,
    +                                               Data.Word.Word8, Double,
    +                                               Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +mul x y | eqLengthGuard [] =
    +    buildOp (opDef "Mul"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Returns x / y element-wise.
    +--
    +-- *NOTE*: `Div` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +div :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int16, Data.Int.Int32,
    +                                               Data.Int.Int64, Data.Int.Int8,
    +                                               Data.Word.Word16,
    +                                               Data.Word.Word8, Double,
    +                                               Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +div x y | eqLengthGuard [] =
    +    buildOp (opDef "Div"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Forwards the input to the output.
    +--
    +-- This operator represents the loop termination condition used by the
    +-- "pivot" switches of a loop.
    +loopCond :: Tensor v1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op.
    +            -> Tensor Value Bool -- ^ __output__: The same tensor as `input`.
    +loopCond input | eqLengthGuard [] =
    +    buildOp (opDef "LoopCond")
    +        input
    +{-
    +input_arg {
    +  description: "A boolean scalar, representing the branch predicate of the Switch op."
    +  name: "input"
    +  type: DT_BOOL
    +}
    +output_arg {
    +  description: "The same tensor as `input`."
    +  name: "output"
    +  type: DT_BOOL
    +}
    +-}
    +
    +-- | Returns (x - y)(x - y) element-wise.
    +--
    +-- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +squaredDifference :: forall v1 v2 t . (TensorType t,
    +                                       OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int32, Data.Int.Int64,
    +                                               Data.Word.Word16, Double,
    +                                               Float] t) =>
    +                     Tensor v1 t -- ^ __x__
    +                     -> Tensor v2 t -- ^ __y__
    +                     -> Tensor Value t -- ^ __z__
    +squaredDifference x y | eqLengthGuard [] =
    +    buildOp (opDef "SquaredDifference"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Returns the max of x and y (i.e. x > y ? x : y) element-wise.
    +--
    +-- *NOTE*: `Maximum` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +maximum :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
    +                                                   Data.Int.Int64,
    +                                                   Data.Word.Word16, Double,
    +                                                   Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor v2 t -- ^ __y__
    +           -> Tensor Value t -- ^ __z__
    +maximum x y | eqLengthGuard [] =
    +    buildOp (opDef "Maximum"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Generates labels for candidate sampling with a log-uniform distribution.
    +--
    +-- See explanations of candidate sampling and the data formats at
    +-- go/candidate-sampling.
    +-- 
    +-- For each batch, this op picks a single set of sampled candidate labels.
    +-- 
    +-- The advantages of sampling candidates per-batch are simplicity and the
    +-- possibility of efficient dense matrix multiplication. The disadvantage is that
    +-- the sampled candidates must be chosen independently of the context and of the
    +-- true labels.
    +logUniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
    +                              -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
    +                              -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
    +                              -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
    +                                      -- candidates in a batch are unique. This requires some approximation to
    +                                      -- estimate the post-rejection sampling probabilities.
    +                              -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
    +                                                          -- IDs of the num_true target_classes in the corresponding original label.
    +                              -> (Tensor Value Data.Int.Int64,
    +                                  Tensor Value Float, Tensor Value Float)
    +                              -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
    +                              --
    +                              -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
    +                              -- the ID of a sampled candidate.
    +                              --
    +                              -- * __true_expected_count__: A batch_size * num_true matrix, representing
    +                              -- the number of times each candidate is expected to occur in a batch
    +                              -- of sampled candidates. If unique=true, then this is a probability.
    +                              --
    +                              -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
    +                              -- candidate representing the number of times the candidate is expected
    +                              -- to occur in a batch of sampled candidates.  If unique=true, then this is a
    +                              -- probability.
    +logUniformCandidateSampler num_sampled num_true range_max unique
    +                           true_classes | eqLengthGuard [] =
    +    buildOp (opDef "LogUniformCandidateSampler"
    +             & opAttr "num_sampled" .~ num_sampled
    +             & opAttr "num_true" .~ num_true
    +             & opAttr "range_max" .~ range_max
    +             & opAttr "unique" .~ unique)
    +        true_classes
    +{-
    +attr {
    +  description: "Number of true labels per context."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_true"
    +  type: "int"
    +}
    +attr {
    +  description: "Number of candidates to randomly sample per batch."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_sampled"
    +  type: "int"
    +}
    +attr {
    +  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
    +  name: "unique"
    +  type: "bool"
    +}
    +attr {
    +  description: "The sampler will sample integers from the interval [0, range_max)."
    +  has_minimum: true
    +  minimum: 1
    +  name: "range_max"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
    +  name: "seed"
    +  type: "int"
    +}
    +attr {
    +  default_value { i: 0 }
    +  description: "An second seed to avoid seed collision."
    +  name: "seed2"
    +  type: "int"
    +}
    +input_arg {
    +  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    +  name: "true_classes"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    +  name: "sampled_candidates"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    +  name: "true_expected_count"
    +  type: DT_FLOAT
    +}
    +output_arg {
    +  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    +  name: "sampled_expected_count"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Returns the truth value of (x < y) element-wise.
    +--
    +-- *NOTE*: `Less` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +less :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                                Data.Int.Int64, Data.Int.Int8,
    +                                                Data.Word.Word16,
    +                                                Data.Word.Word8, Double,
    +                                                Float] t) =>
    +        Tensor v1 t -- ^ __x__
    +        -> Tensor v2 t -- ^ __y__
    +        -> Tensor Value Bool -- ^ __z__
    +less x y | eqLengthGuard [] =
    +    buildOp (opDef "Less"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Computes the power of one value to another.
    +--
    +-- Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
    +-- corresponding elements in `x` and `y`. For example:
    +-- 
    +-- ```
    +-- # tensor 'x' is [[2, 2]], [3, 3]]
    +-- # tensor 'y' is [[8, 16], [2, 3]]
    +-- tf.pow(x, y) ==> [[256, 65536], [9, 27]]
    +-- ```
    +pow :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                               (Data.Complex.Complex Float),
    +                                               Data.Int.Int32, Data.Int.Int64,
    +                                               Data.Word.Word16, Double,
    +                                               Float] t) =>
    +       Tensor v1 t -- ^ __x__
    +       -> Tensor v2 t -- ^ __y__
    +       -> Tensor Value t -- ^ __z__
    +pow x y | eqLengthGuard [] =
    +    buildOp (opDef "Pow"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`.
    +--
    +-- The upper regularized incomplete Gamma function is defined as:
    +-- 
    +-- ```
    +-- Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)
    +-- ```
    +-- where
    +-- ```
    +-- Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt
    +-- ```
    +-- is the upper incomplete Gama function.
    +-- 
    +-- Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
    +-- Gamma function.
    +igammac :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +           Tensor v1 t -- ^ __a__
    +           -> Tensor v2 t -- ^ __x__
    +           -> Tensor Value t -- ^ __z__
    +igammac a x | eqLengthGuard [] =
    +    buildOp (opDef "Igammac"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a x
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "T" }
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`.
    +--
    +-- The lower regularized incomplete Gamma function is defined as:
    +-- 
    +-- ```
    +-- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)
    +-- ```
    +-- where
    +-- ```
    +-- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt
    +-- ```
    +-- is the lower incomplete Gamma function.
    +-- 
    +-- Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
    +-- Gamma function.
    +igamma :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +          Tensor v1 t -- ^ __a__
    +          -> Tensor v2 t -- ^ __x__
    +          -> Tensor Value t -- ^ __z__
    +igamma a x | eqLengthGuard [] =
    +    buildOp (opDef "Igamma"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a x
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "T" }
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
    +--
    +-- The Hurwitz zeta function is defined as:
    +-- 
    +-- ```
    +-- \zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}
    +-- ```
    +zeta :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
    +        Tensor v1 t -- ^ __x__
    +        -> Tensor v2 t -- ^ __q__
    +        -> Tensor Value t -- ^ __z__
    +zeta x q | eqLengthGuard [] =
    +    buildOp (opDef "Zeta"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x q
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "q" type_attr: "T" }
    +output_arg { name: "z" type_attr: "T" }
    +-}
    +
    +-- | Returns the imaginary part of a complex number.
    +--
    +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
    +-- type `float` that is the imaginary part of each element in `input`. All
    +-- elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
    +-- is the real part and *b* is the imaginary part returned by this operation.
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
    +-- tf.imag(input) ==> [4.75, 5.75]
    +-- ```
    +imag :: forall v1 t tout . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float)] t,
    +                            TensorType tout, OneOf '[Double, Float] tout) =>
    +        Tensor v1 t -- ^ __input__
    +        -> Tensor Value tout -- ^ __output__
    +imag input | eqLengthGuard [] =
    +    buildOp (opDef "Imag"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
    +  default_value { type: DT_COMPLEX64 }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "Tout"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "Tout" }
    +-}
    +
    +-- | Converts two real numbers to a complex number.
    +--
    +-- Given a tensor `real` representing the real part of a complex number, and a
    +-- tensor `imag` representing the imaginary part of a complex number, this
    +-- operation returns complex numbers elementwise of the form \\(a + bj\\), where
    +-- *a* represents the `real` part and *b* represents the `imag` part.
    +-- 
    +-- The input tensors `real` and `imag` must have the same shape.
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor 'real' is [2.25, 3.25]
    +-- # tensor `imag` is [4.75, 5.75]
    +-- tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
    +-- ```
    +complex :: forall v1 v2 t tout . (TensorType t, OneOf '[Double, Float] t,
    +                                  TensorType tout,
    +                                  OneOf '[(Data.Complex.Complex Double),
    +                                          (Data.Complex.Complex Float)] tout) =>
    +           Tensor v1 t -- ^ __real__
    +           -> Tensor v2 t -- ^ __imag__
    +           -> Tensor Value tout -- ^ __out__
    +complex real imag | eqLengthGuard [] =
    +    buildOp (opDef "Complex"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        real imag
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
    +  default_value { type: DT_COMPLEX64 }
    +  name: "Tout"
    +  type: "type"
    +}
    +input_arg { name: "real" type_attr: "T" }
    +input_arg { name: "imag" type_attr: "T" }
    +output_arg { name: "out" type_attr: "Tout" }
    +-}
    +
    +-- | Returns the truth value of (x != y) element-wise.
    +--
    +-- *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +notEqual :: forall v1 v2 t . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float), Bool,
    +                                      Data.ByteString.ByteString,
    +                                      Data.Int.Int16, Data.Int.Int32,
    +                                      Data.Int.Int64, Data.Int.Int8,
    +                                      Data.Word.Word16, Data.Word.Word8, Double,
    +                                      Float] t) => Tensor v1 t -- ^ __x__
    +            -> Tensor v2 t -- ^ __y__
    +            -> Tensor Value Bool -- ^ __z__
    +notEqual x y | eqLengthGuard [] =
    +    buildOp (opDef "NotEqual"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x y
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_COMPLEX64
    +      type: DT_QUINT8
    +      type: DT_QINT8
    +      type: DT_QINT32
    +      type: DT_STRING
    +      type: DT_BOOL
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "y" type_attr: "T" }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | Computes the complex absolute value of a tensor.
    +--
    +-- Given a tensor `x` of complex numbers, this operation returns a tensor of type
    +-- `float` or `double` that is the absolute value of each element in `x`. All
    +-- elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
    +-- value is computed as \\( \sqrt{a^2 + b^2}\\).
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
    +-- tf.complex_abs(x) ==> [5.25594902, 6.60492229]
    +-- ```
    +complexAbs :: forall v1 t tout . (TensorType t,
    +                                  OneOf '[(Data.Complex.Complex Double),
    +                                          (Data.Complex.Complex Float)] t,
    +                                  TensorType tout, OneOf '[Double,
    +                                                           Float] tout) =>
    +              Tensor v1 t -- ^ __x__
    +              -> Tensor Value tout -- ^ __y__
    +complexAbs x | eqLengthGuard [] =
    +    buildOp (opDef "ComplexAbs"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        x
    +{-
    +attr {
    +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
    +  default_value { type: DT_COMPLEX64 }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "Tout"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "Tout" }
    +-}
    +
    +-- | Returns the truth value of x AND y element-wise.
    +--
    +-- *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
    +-- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    +logicalAnd :: Tensor v1 Bool -- ^ __x__
    +              -> Tensor v2 Bool -- ^ __y__
    +              -> Tensor Value Bool -- ^ __z__
    +logicalAnd x y | eqLengthGuard [] =
    +    buildOp (opDef "LogicalAnd")
    +        x y
    +{-
    +input_arg { name: "x" type: DT_BOOL }
    +input_arg { name: "y" type: DT_BOOL }
    +output_arg { name: "z" type: DT_BOOL }
    +-}
    +
    +-- | 
    +
    +batchFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +            -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchFFT input | eqLengthGuard [] =
    +    buildOp (opDef "BatchFFT")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | Selects elements from `t` or `e`, depending on `condition`.
    +--
    +-- The `t`, and `e` tensors must all have the same shape,
    +-- and the output will also have that shape.  The `condition` tensor
    +-- must be a scalar if `t` and `e` are scalars.  If `t` and `e` are vectors
    +-- or higher rank, then `condition` must be either a vector with size
    +-- matching the first dimension of `t`, or must have the same shape as `t`.
    +-- 
    +-- The `condition` tensor acts as a mask that chooses, based on the value at each
    +-- element, whether the corresponding element / row in the output should be
    +-- taken from `t` (if true) or `e` (if false).
    +-- 
    +-- If `condition` is a vector and `t` and `e` are higher rank matrices, then
    +-- it chooses which row (outer dimension) to copy from `t` and `e`.
    +-- If `condition` has the same shape as `t` and `e`, then it chooses which
    +-- element to copy from `t` and `e`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- # 'condition' tensor is [[True,  False]
    +-- #                        [False, True]]
    +-- # 't' is [[1, 2],
    +-- #         [3, 4]]
    +-- # 'e' is [[5, 6],
    +-- #         [7, 8]]
    +-- select(condition, t, e) ==> [[1, 6],
    +--                              [7, 4]]
    +-- 
    +-- 
    +-- # 'condition' tensor is [True, False]
    +-- # 't' is [[1, 2],
    +-- #         [3, 4]]
    +-- # 'e' is [[5, 6],
    +-- #         [7, 8]]
    +-- select(condition, t, e) ==> [[1, 2],
    +--                              [7, 8]]
    +-- 
    +-- ```
    +select :: forall v1 v2 v3 t . (TensorType t) =>
    +          Tensor v1 Bool -- ^ __condition__
    +          -> Tensor v2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`.
    +                         -- If `condition` is rank 1, `t` may have higher rank,
    +                         -- but its first dimension must match the size of `condition`.
    +          -> Tensor v3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`.
    +          -> Tensor Value t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`.
    +select condition t e | eqLengthGuard [] =
    +    buildOp (opDef "Select"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        condition t e
    +{-
    +attr { name: "T" type: "type" }
    +input_arg { name: "condition" type: DT_BOOL }
    +input_arg {
    +  description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`."
    +  name: "t"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A `Tensor` with the same type and shape as `t`."
    +  name: "e"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "= A `Tensor` with the same type and shape as `t` and `e`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Multiply the matrix "a" by the matrix "b".
    +--
    +-- The inputs must be two-dimensional matrices and the inner dimension of
    +-- "a" (after being transposed if transpose_a is true) must match the
    +-- outer dimension of "b" (after being transposed if transposed_b is
    +-- true).
    +-- 
    +-- *Note*: The default kernel implementation for MatMul on GPUs uses
    +-- cublas.
    +matMul :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int32,
    +                                                  Data.Word.Word16, Double,
    +                                                  Float] t) =>
    +          Tensor v1 t -- ^ __a__
    +          -> Tensor v2 t -- ^ __b__
    +          -> Tensor Value t -- ^ __product__
    +matMul a b | eqLengthGuard [] =
    +    buildOp (opDef "MatMul"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a b
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, \"a\" is transposed before multiplication."
    +  name: "transpose_a"
    +  type: "bool"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, \"b\" is transposed before multiplication."
    +  name: "transpose_b"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "a" type_attr: "T" }
    +input_arg { name: "b" type_attr: "T" }
    +output_arg { name: "product" type_attr: "T" }
    +-}
    +
    +-- | Computes Psi, the derivative of Lgamma (the log of the absolute value of
    +--
    +-- `Gamma(x)`), element-wise.
    +digamma :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                Float] t) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor Value t -- ^ __y__
    +digamma x | eqLengthGuard [] =
    +    buildOp (opDef "Digamma"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | Computes the gradients of convolution with respect to the filter.
    +
    +conv2DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
    +                                             OneOf '[Data.Word.Word16, Double,
    +                                                     Float] t) =>
    +                        Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
    +                        -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
    +                                                    -- where `filter` is a 4-D
    +                                                    -- `[filter_height, filter_width, in_channels, out_channels]` tensor.
    +                        -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
    +                                       -- Gradients w.r.t. the output of the convolution.
    +                        -> Tensor Value t -- ^ __output__: 4-D with shape
    +                        -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
    +                        -- the `filter` input of the convolution.
    +conv2DBackpropFilter input filter_sizes out_backprop | eqLengthGuard [] =
    +    buildOp (opDef "Conv2DBackpropFilter"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input filter_sizes out_backprop
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
    +  name: "strides"
    +  type: "list(int)"
    +}
    +attr {
    +  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
    +}
    +attr {
    +  allowed_values { list { s: "SAME" s: "VALID" } }
    +  description: "The type of padding algorithm to use."
    +  name: "padding"
    +  type: "string"
    +}
    +attr {
    +  allowed_values { list { s: "NHWC" s: "NCHW" } }
    +  default_value { s: "NHWC" }
    +  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    +  name: "data_format"
    +  type: "string"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
    +  name: "input"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor."
    +  name: "filter_sizes"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    +  name: "out_backprop"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the minimum of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +min :: forall v1 v2 t tidx . (TensorType t,
    +                              OneOf '[(Data.Complex.Complex Double),
    +                                      (Data.Complex.Complex Float),
    +                                      Data.Int.Int16, Data.Int.Int32,
    +                                      Data.Int.Int64, Data.Int.Int8,
    +                                      Data.Word.Word16, Data.Word.Word8, Double,
    +                                      Float] t, TensorType tidx,
    +                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
    +       Tensor v1 t -- ^ __input__: The tensor to reduce.
    +       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +       -> Tensor Value t -- ^ __output__: The reduced tensor.
    +min input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Min"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type_attr: "T"
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type_attr: "T"
    +}
    +-}
    +
    +-- | Returns which elements of x are finite.
    +
    +isFinite :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                                 Float] t) =>
    +            Tensor v1 t -- ^ __x__
    +            -> Tensor Value Bool -- ^ __y__
    +isFinite x | eqLengthGuard [] =
    +    buildOp (opDef "IsFinite"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type: DT_BOOL }
    +-}
    +
    +-- | Returns the index with the largest value across dimensions of a tensor.
    +
    +argMax :: forall v1 v2 t tidx . (TensorType t,
    +                                 OneOf '[(Data.Complex.Complex Double),
    +                                         (Data.Complex.Complex Float),
    +                                         Data.Int.Int16, Data.Int.Int32,
    +                                         Data.Int.Int64, Data.Int.Int8,
    +                                         Data.Word.Word16, Data.Word.Word8,
    +                                         Double, Float] t, TensorType tidx,
    +                                 OneOf '[Data.Int.Int32,
    +                                         Data.Int.Int64] tidx) =>
    +          Tensor v1 t -- ^ __input__
    +          -> Tensor v2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input).  Describes which dimension
    +                            -- of the input Tensor to reduce across. For vectors, use dimension = 0.
    +          -> Tensor Value Data.Int.Int64 -- ^ __output__
    +argMax input dimension | eqLengthGuard [] =
    +    buildOp (opDef "ArgMax"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input dimension
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +input_arg {
    +  description: "int32, 0 <= dimension < rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
    +  name: "dimension"
    +  type_attr: "Tidx"
    +}
    +output_arg { name: "output" type: DT_INT64 }
    +-}
    +
    +-- | Computes the mean along segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Computes a tensor such that
    +-- \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
    +-- over `j` such that `segment_ids[j] == i` and `N` is the total number of
    +-- values summed.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/SegmentMean.png" alt>
    +-- </div>
    +segmentMean :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                                Data.Int.Int32,
    +                                                                Data.Int.Int64,
    +                                                                Data.Int.Int8,
    +                                                                Data.Word.Word16,
    +                                                                Data.Word.Word8,
    +                                                                Double,
    +                                                                Float] t,
    +                                          TensorType tindices,
    +                                          OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] tindices) =>
    +               Tensor v1 t -- ^ __data__
    +               -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
    +                                     -- first dimension.  Values should be sorted and can be repeated.
    +               -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +               -- has size `k`, the number of segments.
    +segmentMean data' segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SegmentMean"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Compute the cumulative product of the tensor `x` along `axis`.
    +--
    +-- By default, this op performs an inclusive cumprod, which means that the first
    +-- element of the input is identical to the first element of the output:
    +-- ```prettyprint
    +-- tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
    +-- ```
    +-- 
    +-- By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
    +-- performed instead:
    +-- ```prettyprint
    +-- tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b]
    +-- ```
    +-- 
    +-- By setting the `reverse` kwarg to `True`, the cumprod is performed in the
    +-- opposite direction:
    +-- ```prettyprint
    +-- tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
    +-- ```
    +-- This is more efficient than using separate `tf.reverse` ops.
    +-- 
    +-- The `reverse` and `exclusive` kwargs can also be combined:
    +-- ```prettyprint
    +-- tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0]
    +-- ```
    +cumprod :: forall v1 v2 t tidx . (TensorType t,
    +                                  OneOf '[(Data.Complex.Complex Double),
    +                                          (Data.Complex.Complex Float),
    +                                          Data.Int.Int16, Data.Int.Int32,
    +                                          Data.Int.Int64, Data.Int.Int8,
    +                                          Data.Word.Word16, Data.Word.Word8,
    +                                          Double, Float] t, TensorType tidx,
    +                                  OneOf '[Data.Int.Int32,
    +                                          Data.Int.Int64] tidx) =>
    +           Tensor v1 t -- ^ __x__
    +           -> Tensor v2 tidx -- ^ __axis__
    +           -> Tensor Value t -- ^ __out__
    +cumprod x axis | eqLengthGuard [] =
    +    buildOp (opDef "Cumprod"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        x axis
    +{-
    +attr { default_value { b: false } name: "exclusive" type: "bool" }
    +attr { default_value { b: false } name: "reverse" type: "bool" }
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "axis" type_attr: "Tidx" }
    +output_arg { name: "out" type_attr: "T" }
    +-}
    +
    +-- | Computes the minimum along segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Computes a tensor such that
    +-- \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
    +-- that `segment_ids[j] == i`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/SegmentMin.png" alt>
    +-- </div>
    +segmentMin :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                               Data.Int.Int32,
    +                                                               Data.Int.Int64,
    +                                                               Data.Int.Int8,
    +                                                               Data.Word.Word16,
    +                                                               Data.Word.Word8,
    +                                                               Double, Float] t,
    +                                         TensorType tindices,
    +                                         OneOf '[Data.Int.Int32,
    +                                                 Data.Int.Int64] tindices) =>
    +              Tensor v1 t -- ^ __data__
    +              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
    +                                    -- first dimension.  Values should be sorted and can be repeated.
    +              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +              -- has size `k`, the number of segments.
    +segmentMin data' segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SegmentMin"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the sum along segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Computes a tensor such that
    +-- `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
    +-- that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
    +-- need not be sorted and need not cover all values in the full
    +-- range of valid values.
    +-- 
    +-- If the sum is empty for a given segment ID `i`, `output[i] = 0`.
    +-- 
    +-- `num_segments` should equal the number of distinct segment IDs.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/UnsortedSegmentSum.png" alt>
    +-- </div>
    +unsortedSegmentSum :: forall v1 v2 v3 t tindices . (TensorType t,
    +                                                    OneOf '[(Data.Complex.Complex Double),
    +                                                            (Data.Complex.Complex Float),
    +                                                            Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double, Float] t,
    +                                                    TensorType tindices,
    +                                                    OneOf '[Data.Int.Int32,
    +                                                            Data.Int.Int64] tindices) =>
    +                      Tensor v1 t -- ^ __data__
    +                      -> Tensor v2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`.
    +                      -> Tensor v3 Data.Int.Int32 -- ^ __num_segments__
    +                      -> Tensor Value t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank`
    +                      -- dimensions, which are replaced with a single dimension which has size
    +                      -- `num_segments`.
    +unsortedSegmentSum data' segment_ids num_segments | eqLengthGuard [] =
    +    buildOp (opDef "UnsortedSegmentSum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids num_segments
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A tensor whose shape is a prefix of `data.shape`."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +input_arg { name: "num_segments" type: DT_INT32 }
    +output_arg {
    +  description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | A Reader that outputs the records from a TensorFlow Records file.
    +
    +tFRecordReader :: Tensor Value Data.ByteString.ByteString -- ^ __reader_handle__: The handle to reference the Reader.
    +tFRecordReader  | eqLengthGuard [] =
    +    buildOp (opDef "TFRecordReader")
    +        
    +{-
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" } name: "compression_type" type: "string"
    +}
    +output_arg {
    +  description: "The handle to reference the Reader."
    +  is_ref: true
    +  name: "reader_handle"
    +  type: DT_STRING
    +}
    +-}
    +
    +-- | Computes the sum along sparse segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
    +-- dimension, selecting a subset of dimension 0, specified by `indices`.
    +-- 
    +-- For example:
    +-- 
    +-- ```prettyprint
    +-- c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
    +-- 
    +-- # Select two rows, one segment.
    +-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
    +--   ==> [[0 0 0 0]]
    +-- 
    +-- # Select two rows, two segment.
    +-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
    +--   ==> [[ 1  2  3  4]
    +--        [-1 -2 -3 -4]]
    +-- 
    +-- # Select all rows, two segments.
    +-- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
    +--   ==> [[0 0 0 0]
    +--        [5 6 7 8]]
    +-- 
    +-- # Which is equivalent to:
    +-- tf.segment_sum(c, tf.constant([0, 0, 1]))
    +-- ```
    +sparseSegmentSum :: forall v1 v2 v3 t tidx . (TensorType t,
    +                                              OneOf '[Data.Int.Int16,
    +                                                      Data.Int.Int32,
    +                                                      Data.Int.Int64,
    +                                                      Data.Int.Int8,
    +                                                      Data.Word.Word16,
    +                                                      Data.Word.Word8, Double,
    +                                                      Float] t, TensorType tidx,
    +                                              OneOf '[Data.Int.Int32,
    +                                                      Data.Int.Int64] tidx) =>
    +                    Tensor v1 t -- ^ __data__
    +                    -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
    +                    -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
    +                    -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +                    -- has size `k`, the number of segments.
    +sparseSegmentSum data' indices segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SparseSegmentSum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        data' indices segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor. Has same rank as `segment_ids`."
    +  name: "indices"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "A 1-D tensor. Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Computes the sum along sparse segments of a tensor divided by the sqrt of N.
    +--
    +-- N is the size of the segment being reduced.
    +-- 
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +sparseSegmentSqrtN :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double,
    +                                                                      Float] t,
    +                                                TensorType tidx,
    +                                                OneOf '[Data.Int.Int32,
    +                                                        Data.Int.Int64] tidx) =>
    +                      Tensor v1 t -- ^ __data__
    +                      -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
    +                      -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
    +                      -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +                      -- has size `k`, the number of segments.
    +sparseSegmentSqrtN data' indices segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SparseSegmentSqrtN"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        data' indices segment_ids
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor. Has same rank as `segment_ids`."
    +  name: "indices"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "A 1-D tensor. Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Copy Host Op.
    +--
    +-- Performs CPU-to-CPU deep-copying of tensor.
    +-- 
    +-- Unlike the Copy Op, this op has HostMemory constraint on its input or output.
    +copyHost :: forall v1 t . (TensorType t) =>
    +            Tensor v1 t -- ^ __input__: Input tensor.
    +            -> Tensor Value t -- ^ __output__: Output tensor, deep-copied from input.
    +copyHost input | eqLengthGuard [] =
    +    buildOp (opDef "CopyHost"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  default_value { s: "" }
    +  description: "The name of the input tensor."
    +  name: "tensor_name"
    +  type: "string"
    +}
    +input_arg {
    +  description: "Input tensor." name: "input" type_attr: "T"
    +}
    +output_arg {
    +  description: "Output tensor, deep-copied from input."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Holds state in the form of a tensor that persists across steps.
    +--
    +-- Outputs a ref to the tensor state so it may be read or modified.
    +-- TODO(zhifengc/mrry): Adds a pointer to a more detail document
    +-- about sharing states in tensorflow.
    +variable :: forall dtype . (TensorType dtype) =>
    +            Tensor Value dtype -- ^ __ref__: A reference to the variable tensor.
    +variable  | eqLengthGuard [] =
    +    buildOp (opDef "Variable"
    +             & opAttr "dtype" .~ tensorType (undefined :: dtype))
    +        
    +{-
    +attr {
    +  description: "The shape of the variable tensor."
    +  name: "shape"
    +  type: "shape"
    +}
    +attr {
    +  description: "The type of elements in the variable tensor."
    +  name: "dtype"
    +  type: "type"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used."
    +  name: "container"
    +  type: "string"
    +}
    +attr {
    +  default_value { s: "" }
    +  description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
    +  name: "shared_name"
    +  type: "string"
    +}
    +output_arg {
    +  description: "A reference to the variable tensor."
    +  is_ref: true
    +  name: "ref"
    +  type_attr: "dtype"
    +}
    +-}
    +
    +-- | Computes gradients for SparseSegmentSqrtN.
    +--
    +-- Returns tensor "output" with same shape as grad, except for dimension 0 whose
    +-- value is output_dim0.
    +sparseSegmentSqrtNGrad :: forall v1 v2 v3 v4 t tidx . (TensorType t,
    +                                                       OneOf '[Double, Float] t,
    +                                                       TensorType tidx,
    +                                                       OneOf '[Data.Int.Int32,
    +                                                               Data.Int.Int64] tidx) =>
    +                          Tensor v1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op.
    +                          -> Tensor v2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op.
    +                          -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op.
    +                          -> Tensor v4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op.
    +                          -> Tensor Value t -- ^ __output__
    +sparseSegmentSqrtNGrad grad indices segment_ids output_dim0 | eqLengthGuard [] =
    +    buildOp (opDef "SparseSegmentSqrtNGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        grad indices segment_ids output_dim0
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "gradient propagated to the SparseSegmentSqrtN op."
    +  name: "grad"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "indices passed to the corresponding SparseSegmentSqrtN op."
    +  name: "indices"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "segment_ids passed to the corresponding SparseSegmentSqrtN op."
    +  name: "segment_ids"
    +  type: DT_INT32
    +}
    +input_arg {
    +  description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op."
    +  name: "output_dim0"
    +  type: DT_INT32
    +}
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Creates a sequence of integers.
    +--
    +-- This operation creates a sequence of integers that begins at `start` and
    +-- extends by increments of `delta` up to but not including `limit`.
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # 'start' is 3
    +-- # 'limit' is 18
    +-- # 'delta' is 3
    +-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
    +-- ```
    +range :: forall v1 v2 v3 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
    +                                                          Data.Int.Int64] tidx) =>
    +         Tensor v1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence.
    +         -> Tensor v2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive.
    +         -> Tensor v3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
    +         -> Tensor Value tidx -- ^ __output__: 1-D.
    +range start limit delta | eqLengthGuard [] =
    +    buildOp (opDef "Range"
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        start limit delta
    +{-
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "0-D (scalar). First entry in the sequence."
    +  name: "start"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "0-D (scalar). Upper limit of sequence, exclusive."
    +  name: "limit"
    +  type_attr: "Tidx"
    +}
    +input_arg {
    +  description: "0-D (scalar). Optional. Default is 1. Number that increments `start`."
    +  name: "delta"
    +  type_attr: "Tidx"
    +}
    +output_arg { description: "1-D." name: "output" type_attr: "Tidx" }
    +-}
    +
    +-- | Computes the "logical or" of elements across dimensions of a tensor.
    +--
    +-- Reduces `input` along the dimensions given in `reduction_indices`. Unless
    +-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
    +-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
    +-- retained with length 1.
    +any :: forall v1 v2 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
    +                                                     Data.Int.Int64] tidx) =>
    +       Tensor v1 Bool -- ^ __input__: The tensor to reduce.
    +       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
    +       -> Tensor Value Bool -- ^ __output__: The reduced tensor.
    +any input reduction_indices | eqLengthGuard [] =
    +    buildOp (opDef "Any"
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        input reduction_indices
    +{-
    +attr {
    +  default_value { b: false }
    +  description: "If true, retain reduced dimensions with length 1."
    +  name: "keep_dims"
    +  type: "bool"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "The tensor to reduce." name: "input" type: DT_BOOL
    +}
    +input_arg {
    +  description: "The dimensions to reduce."
    +  name: "reduction_indices"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "The reduced tensor." name: "output" type: DT_BOOL
    +}
    +-}
    +
    +-- | Generates values in an interval.
    +--
    +-- A sequence of `num` evenly-spaced values are generated beginning at `start`.
    +-- If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
    +-- so that the last one is exactly `stop`.
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
    +-- ```
    +linSpace :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double, Float] t,
    +                                      TensorType tidx, OneOf '[Data.Int.Int32,
    +                                                               Data.Int.Int64] tidx) =>
    +            Tensor v1 t -- ^ __start__: First entry in the range.
    +            -> Tensor v2 t -- ^ __stop__: Last entry in the range.
    +            -> Tensor v3 tidx -- ^ __num__: Number of values to generate.
    +            -> Tensor Value t -- ^ __output__: 1-D. The generated values.
    +linSpace start stop num | eqLengthGuard [] =
    +    buildOp (opDef "LinSpace"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        start stop num
    +{-
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg {
    +  description: "First entry in the range."
    +  name: "start"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Last entry in the range." name: "stop" type_attr: "T"
    +}
    +input_arg {
    +  description: "Number of values to generate."
    +  name: "num"
    +  type_attr: "Tidx"
    +}
    +output_arg {
    +  description: "1-D. The generated values."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Resize `images` to `size` using area interpolation.
    +--
    +-- Input images can be of different types but output images are always float.
    +resizeArea :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
    +                                                      Data.Int.Int32,
    +                                                      Data.Int.Int64,
    +                                                      Data.Int.Int8,
    +                                                      Data.Word.Word16,
    +                                                      Data.Word.Word8, Double,
    +                                                      Float] t) =>
    +              Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
    +              -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
    +                                          -- new size for the images.
    +              -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
    +              -- `[batch, new_height, new_width, channels]`.
    +resizeArea images size | eqLengthGuard [] =
    +    buildOp (opDef "ResizeArea"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        images size
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_UINT8
    +      type: DT_INT8
    +      type: DT_INT16
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_HALF
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  default_value { b: false }
    +  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
    +  name: "align_corners"
    +  type: "bool"
    +}
    +input_arg {
    +  description: "4-D with shape `[batch, height, width, channels]`."
    +  name: "images"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
    +  name: "size"
    +  type: DT_INT32
    +}
    +output_arg {
    +  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
    +  name: "resized_images"
    +  type: DT_FLOAT
    +}
    +-}
    +
    +-- | Returns the real part of a complex number.
    +--
    +-- Given a tensor `input` of complex numbers, this operation returns a tensor of
    +-- type `float` that is the real part of each element in `input`. All elements in
    +-- `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
    +--  part returned by this operation and *b* is the imaginary part.
    +-- 
    +-- For example:
    +-- 
    +-- ```
    +-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
    +-- tf.real(input) ==> [-2.25, 3.25]
    +-- ```
    +real :: forall v1 t tout . (TensorType t, OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float)] t,
    +                            TensorType tout, OneOf '[Double, Float] tout) =>
    +        Tensor v1 t -- ^ __input__
    +        -> Tensor Value tout -- ^ __output__
    +real input | eqLengthGuard [] =
    +    buildOp (opDef "Real"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tout" .~ tensorType (undefined :: tout))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
    +  default_value { type: DT_COMPLEX64 }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
    +  default_value { type: DT_FLOAT }
    +  name: "Tout"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "Tout" }
    +-}
    +
    +-- | Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most
    +--
    +-- dimension of `input`.
    +iFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +        -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
    +        -- dimension of `input` is replaced with its inverse 1D Fourier Transform.
    +iFFT input | eqLengthGuard [] =
    +    buildOp (opDef "IFFT")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its inverse 1D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most
    +--
    +-- 3 dimensions of `input`.
    +iFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
    +          -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
    +          -- dimensions of `input` are replaced with their inverse 3D Fourier Transform.
    +iFFT3D input | eqLengthGuard [] =
    +    buildOp (opDef "IFFT3D")
    +        input
    +{-
    +input_arg {
    +  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
    +}
    +output_arg {
    +  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\ndimensions of `input` are replaced with their inverse 3D Fourier Transform."
    +  name: "output"
    +  type: DT_COMPLEX64
    +}
    +-}
    +
    +-- | Compute the pairwise cross product.
    +--
    +-- `a` and `b` must be the same shape; they can either be simple 3-element vectors,
    +-- or any shape where the innermost dimension is 3. In the latter case, each pair
    +-- of corresponding 3-element vectors is cross-multiplied independently.
    +cross :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t) =>
    +         Tensor v1 t -- ^ __a__: A tensor containing 3-element vectors.
    +         -> Tensor v2 t -- ^ __b__: Another tensor, of same type and shape as `a`.
    +         -> Tensor Value t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`.
    +cross a b | eqLengthGuard [] =
    +    buildOp (opDef "Cross"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        a b
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "A tensor containing 3-element vectors."
    +  name: "a"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "Another tensor, of same type and shape as `a`."
    +  name: "b"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "Pairwise cross product of the vectors in `a` and `b`."
    +  name: "product"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Compute the cumulative sum of the tensor `x` along `axis`.
    +--
    +-- By default, this op performs an inclusive cumsum, which means that the first
    +-- element of the input is identical to the first element of the output:
    +-- ```prettyprint
    +-- tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
    +-- ```
    +-- 
    +-- By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
    +-- performed instead:
    +-- ```prettyprint
    +-- tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
    +-- ```
    +-- 
    +-- By setting the `reverse` kwarg to `True`, the cumsum is performed in the
    +-- opposite direction:
    +-- ```prettyprint
    +-- tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
    +-- ```
    +-- This is more efficient than using separate `tf.reverse` ops.
    +-- 
    +-- The `reverse` and `exclusive` kwargs can also be combined:
    +-- ```prettyprint
    +-- tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
    +-- ```
    +cumsum :: forall v1 v2 t tidx . (TensorType t,
    +                                 OneOf '[(Data.Complex.Complex Double),
    +                                         (Data.Complex.Complex Float),
    +                                         Data.Int.Int16, Data.Int.Int32,
    +                                         Data.Int.Int64, Data.Int.Int8,
    +                                         Data.Word.Word16, Data.Word.Word8,
    +                                         Double, Float] t, TensorType tidx,
    +                                 OneOf '[Data.Int.Int32,
    +                                         Data.Int.Int64] tidx) =>
    +          Tensor v1 t -- ^ __x__
    +          -> Tensor v2 tidx -- ^ __axis__
    +          -> Tensor Value t -- ^ __out__
    +cumsum x axis | eqLengthGuard [] =
    +    buildOp (opDef "Cumsum"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
    +        x axis
    +{-
    +attr { default_value { b: false } name: "exclusive" type: "bool" }
    +attr { default_value { b: false } name: "reverse" type: "bool" }
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  default_value { type: DT_INT32 }
    +  name: "Tidx"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +input_arg { name: "axis" type_attr: "Tidx" }
    +output_arg { name: "out" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchIFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +             -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchIFFT input | eqLengthGuard [] =
    +    buildOp (opDef "BatchIFFT")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | Computes the Gauss error function of `x` element-wise.
    +
    +erf :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                            Float] t) => Tensor v1 t -- ^ __x__
    +       -> Tensor Value t -- ^ __y__
    +erf x | eqLengthGuard [] =
    +    buildOp (opDef "Erf"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | For each key, assigns the respective value to the specified component.
    +--
    +-- If a key is not found in the barrier, this operation will create a new
    +-- incomplete element. If a key is found in the barrier, and the element
    +-- already has a value at component_index, this operation will fail with
    +-- INVALID_ARGUMENT, and leave the barrier in an undefined state.
    +barrierInsertMany :: forall v1 v2 v3 t . (TensorType t) =>
    +                     Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned.
    +                     -> Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
    +                     -> Tensor v2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n.
    +                     -> Tensor v3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the
    +                                    -- respective keys. The 0th dimension must have length n.
    +                     -> ControlNode
    +barrierInsertMany component_index handle keys values | eqLengthGuard [] =
    +    buildOp (opDef "BarrierInsertMany"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "component_index" .~ component_index)
    +        handle keys values
    +{-
    +attr { name: "T" type: "type" }
    +attr {
    +  description: "The component of the barrier elements that is being assigned."
    +  name: "component_index"
    +  type: "int"
    +}
    +input_arg {
    +  description: "The handle to a barrier."
    +  is_ref: true
    +  name: "handle"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "A one-dimensional tensor of keys, with length n."
    +  name: "keys"
    +  type: DT_STRING
    +}
    +input_arg {
    +  description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n."
    +  name: "values"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Returns element-wise largest integer not greater than x.
    +
    +floor :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
    +                                              Float] t) =>
    +         Tensor v1 t -- ^ __x__
    +         -> Tensor Value t -- ^ __y__
    +floor x | eqLengthGuard [] =
    +    buildOp (opDef "Floor"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        x
    +{-
    +attr {
    +  allowed_values {
    +    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "x" type_attr: "T" }
    +output_arg { name: "y" type_attr: "T" }
    +-}
    +
    +-- | 
    +
    +batchFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
    +              -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
    +batchFFT2D input | eqLengthGuard [] =
    +    buildOp (opDef "BatchFFT2D")
    +        input
    +{-
    +input_arg { name: "input" type: DT_COMPLEX64 }
    +output_arg { name: "output" type: DT_COMPLEX64 }
    +-}
    +
    +-- | The gradient operator for the SparseAdd op.
    +--
    +-- The SparseAdd op calculates A + B, where A, B, and the sum are all represented
    +-- as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
    +-- non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
    +-- values of A and B.
    +sparseAddGrad :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                         OneOf '[(Data.Complex.Complex Double),
    +                                                 (Data.Complex.Complex Float),
    +                                                 Data.Int.Int16, Data.Int.Int32,
    +                                                 Data.Int.Int64, Data.Int.Int8,
    +                                                 Data.Word.Word16,
    +                                                 Data.Word.Word8, Double,
    +                                                 Float] t) =>
    +                 Tensor v1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
    +                             -- the non-empty values of the sum.
    +                 -> Tensor v2 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
    +                 -> Tensor v3 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
    +                 -> Tensor v4 Data.Int.Int64 -- ^ __sum_indices__: 2-D.  The `indices` of the sum `SparseTensor`, size
    +                                             -- `[nnz(sum), ndims]`.
    +                 -> (Tensor Value t, Tensor Value t)
    +                 -- ^ (__a_val_grad__, __b_val_grad__)
    +                 --
    +                 -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the
    +                 -- non-empty values of A.
    +                 --
    +                 -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the
    +                 -- non-empty values of B.
    +sparseAddGrad backprop_val_grad a_indices b_indices
    +              sum_indices | eqLengthGuard [] =
    +    buildOp (opDef "SparseAddGrad"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        backprop_val_grad a_indices b_indices sum_indices
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "1-D with shape `[nnz(sum)]`.  The gradient with respect to\nthe non-empty values of the sum."
    +  name: "backprop_val_grad"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`."
    +  name: "a_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`."
    +  name: "b_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`."
    +  name: "sum_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A."
    +  name: "a_val_grad"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B."
    +  name: "b_val_grad"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Adds two `SparseTensor` objects to produce another `SparseTensor`.
    +--
    +-- The input `SparseTensor` objects' indices are assumed ordered in standard
    +-- lexicographic order.  If this is not the case, before this step run
    +-- `SparseReorder` to restore index ordering.
    +-- 
    +-- By default, if two values sum to zero at some index, the output `SparseTensor`
    +-- would still include that particular location in its index, storing a zero in the
    +-- corresponding value slot.  To override this, callers can specify `thresh`,
    +-- indicating that if the sum has a magnitude strictly smaller than `thresh`, its
    +-- corresponding value and index would then not be included.  In particular,
    +-- `thresh == 0` (default) means everything is kept and actual thresholding happens
    +-- only for a positive value.
    +-- 
    +-- In the following shapes, `nnz` is the count after taking `thresh` into account.
    +sparseAdd :: forall v1 v2 v3 v4 v5 v6 v7 t treal . (TensorType t,
    +                                                    OneOf '[(Data.Complex.Complex Double),
    +                                                            (Data.Complex.Complex Float),
    +                                                            Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double, Float] t,
    +                                                    TensorType treal,
    +                                                    OneOf '[Data.Int.Int16,
    +                                                            Data.Int.Int32,
    +                                                            Data.Int.Int64,
    +                                                            Data.Int.Int8,
    +                                                            Data.Word.Word16,
    +                                                            Data.Word.Word8,
    +                                                            Double,
    +                                                            Float] treal) =>
    +             Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
    +             -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
    +             -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
    +             -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
    +             -> Tensor v5 t -- ^ __b_values__: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
    +             -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
    +             -> Tensor v7 treal -- ^ __thresh__: 0-D.  The magnitude threshold that determines if an output value/index
    +                                -- pair takes space.
    +             -> (Tensor Value Data.Int.Int64, Tensor Value t,
    +                 Tensor Value Data.Int.Int64)
    +             -- ^ (__sum_indices__, __sum_values__, __sum_shape__)
    +             --
    +             -- * __sum_indices__
    +             --
    +             -- * __sum_values__
    +             --
    +             -- * __sum_shape__
    +sparseAdd a_indices a_values a_shape b_indices b_values b_shape
    +          thresh | eqLengthGuard [] =
    +    buildOp (opDef "SparseAdd"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Treal" .~ tensorType (undefined :: treal))
    +        a_indices a_values a_shape b_indices b_values b_shape thresh
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT32
    +      type: DT_INT64
    +      type: DT_UINT8
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_UINT16
    +      type: DT_HALF
    +    }
    +  }
    +  name: "Treal"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix."
    +  name: "a_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector."
    +  name: "a_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector."
    +  name: "a_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix."
    +  name: "b_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector."
    +  name: "b_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector."
    +  name: "b_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "0-D.  The magnitude threshold that determines if an output value/index\npair takes space."
    +  name: "thresh"
    +  type_attr: "Treal"
    +}
    +output_arg { name: "sum_indices" type: DT_INT64 }
    +output_arg { name: "sum_values" type_attr: "T" }
    +output_arg { name: "sum_shape" type: DT_INT64 }
    +-}
    +
    +-- | 
    +
    +batchCholesky :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
    +                 Tensor v1 t -- ^ __input__
    +                 -> Tensor Value t -- ^ __output__
    +batchCholesky input | eqLengthGuard [] =
    +    buildOp (opDef "BatchCholesky"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        input
    +{-
    +attr {
    +  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg { name: "input" type_attr: "T" }
    +output_arg { name: "output" type_attr: "T" }
    +-}
    +
    +-- | Partitions `data` into `num_partitions` tensors using indices from `partitions`.
    +--
    +-- For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
    +-- becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
    +-- are placed in `outputs[i]` in lexicographic order of `js`, and the first
    +-- dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
    +-- In detail,
    +-- 
    +--     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
    +-- 
    +--     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
    +-- 
    +-- `data.shape` must start with `partitions.shape`.
    +-- 
    +-- For example:
    +-- 
    +--     # Scalar partitions
    +--     partitions = 1
    +--     num_partitions = 2
    +--     data = [10, 20]
    +--     outputs[0] = []  # Empty with shape [0, 2]
    +--     outputs[1] = [[10, 20]]
    +-- 
    +--     # Vector partitions
    +--     partitions = [0, 0, 1, 1, 0]
    +--     num_partitions = 2
    +--     data = [10, 20, 30, 40, 50]
    +--     outputs[0] = [10, 20, 50]
    +--     outputs[1] = [30, 40]
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/DynamicPartition.png" alt>
    +-- </div>
    +dynamicPartition :: forall v1 v2 t . (TensorType t) =>
    +                    Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output.
    +                    -> Tensor v1 t -- ^ __data__
    +                    -> Tensor v2 Data.Int.Int32 -- ^ __partitions__: Any shape.  Indices in the range `[0, num_partitions)`.
    +                    -> [Tensor Value t] -- ^ __outputs__
    +dynamicPartition num_partitions data' partitions | eqLengthGuard [] =
    +    buildListOp [num_partitions] (opDef "DynamicPartition"
    +                                  & opAttr "T" .~ tensorType (undefined :: t)
    +                                  & opAttr "num_partitions" .~ num_partitions)
    +        data' partitions
    +{-
    +attr {
    +  description: "The number of partitions to output."
    +  has_minimum: true
    +  minimum: 1
    +  name: "num_partitions"
    +  type: "int"
    +}
    +attr { name: "T" type: "type" }
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "Any shape.  Indices in the range `[0, num_partitions)`."
    +  name: "partitions"
    +  type: DT_INT32
    +}
    +output_arg {
    +  name: "outputs" number_attr: "num_partitions" type_attr: "T"
    +}
    +-}
    +
    +-- | Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
    +
    +serializeSparse :: forall v1 v2 v3 t . (TensorType t) =>
    +                   Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
    +                   -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
    +                   -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
    +                   -> Tensor Value Data.ByteString.ByteString -- ^ __serialized_sparse__
    +serializeSparse sparse_indices sparse_values sparse_shape | eqLengthGuard [] =
    +    buildOp (opDef "SerializeSparse"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sparse_indices sparse_values sparse_shape
    +{-
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "2-D.  The `indices` of the `SparseTensor`."
    +  name: "sparse_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  The `values` of the `SparseTensor`."
    +  name: "sparse_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  The `shape` of the `SparseTensor`."
    +  name: "sparse_shape"
    +  type: DT_INT64
    +}
    +output_arg { name: "serialized_sparse" type: DT_STRING }
    +-}
    +
    +-- | Concatenates a list of `SparseTensor` along the specified dimension.
    +--
    +-- Concatenation is with respect to the dense versions of these sparse tensors.
    +-- It is assumed that each input is a `SparseTensor` whose elements are ordered
    +-- along increasing dimension number.
    +-- 
    +-- All inputs' shapes must match, except for the concat dimension.  The
    +-- `indices`, `values`, and `shapes` lists must have the same length.
    +-- 
    +-- The output shape is identical to the inputs', except along the concat
    +-- dimension, where it is the sum of the inputs' sizes along that dimension.
    +-- 
    +-- The output elements will be resorted to preserve the sort order along
    +-- increasing dimension number.
    +-- 
    +-- This op runs in `O(M log M)` time, where `M` is the total number of non-empty
    +-- values across all inputs. This is due to the need for an internal sort in
    +-- order to concatenate efficiently across an arbitrary dimension.
    +-- 
    +-- For example, if `concat_dim = 1` and the inputs are
    +-- 
    +--     sp_inputs[0]: shape = [2, 3]
    +--     [0, 2]: "a"
    +--     [1, 0]: "b"
    +--     [1, 1]: "c"
    +-- 
    +--     sp_inputs[1]: shape = [2, 4]
    +--     [0, 1]: "d"
    +--     [0, 2]: "e"
    +-- 
    +-- then the output will be
    +-- 
    +--     shape = [2, 7]
    +--     [0, 2]: "a"
    +--     [0, 4]: "d"
    +--     [0, 5]: "e"
    +--     [1, 0]: "b"
    +--     [1, 1]: "c"
    +-- 
    +-- Graphically this is equivalent to doing
    +-- 
    +--     [    a] concat [  d e  ] = [    a   d e  ]
    +--     [b c  ]        [       ]   [b c          ]
    +sparseConcat :: forall v1 v2 v3 t . (TensorType t) =>
    +                Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank),
    +                               -- where rank is the number of dimensions in each input `SparseTensor`.
    +                -> [Tensor v1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
    +                -> [Tensor v2 t] -- ^ __values__: 1-D.  Non-empty values of each `SparseTensor`.
    +                -> [Tensor v3 Data.Int.Int64] -- ^ __shapes__: 1-D.  Shapes of each `SparseTensor`.
    +                -> (Tensor Value Data.Int.Int64, Tensor Value t,
    +                    Tensor Value Data.Int.Int64)
    +                -- ^ (__output_indices__, __output_values__, __output_shape__)
    +                --
    +                -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
    +                --
    +                -- * __output_values__: 1-D.  Non-empty values of the concatenated `SparseTensor`.
    +                --
    +                -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
    +sparseConcat concat_dim indices values
    +             shapes | eqLengthGuard [("N", [("shapes", length shapes),
    +                                            ("values", length values),
    +                                            ("indices", length indices)])] =
    +    buildOp (opDef "SparseConcat"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "concat_dim" .~ concat_dim
    +             & opAttr "N" .~ (fromIntegral (length shapes) :: Int64))
    +        indices values shapes
    +{-
    +attr {
    +  description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`."
    +  name: "concat_dim"
    +  type: "int"
    +}
    +attr { has_minimum: true minimum: 2 name: "N" type: "int" }
    +attr { name: "T" type: "type" }
    +input_arg {
    +  description: "2-D.  Indices of each input `SparseTensor`."
    +  name: "indices"
    +  number_attr: "N"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  Non-empty values of each `SparseTensor`."
    +  name: "values"
    +  number_attr: "N"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shapes of each `SparseTensor`."
    +  name: "shapes"
    +  number_attr: "N"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "2-D.  Indices of the concatenated `SparseTensor`."
    +  name: "output_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  Non-empty values of the concatenated `SparseTensor`."
    +  name: "output_values"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D.  Shape of the concatenated `SparseTensor`."
    +  name: "output_shape"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Computes the product along segments of a tensor.
    +--
    +-- Read [the section on
    +-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
    +-- of segments.
    +-- 
    +-- Computes a tensor such that
    +-- \\(output_i = \prod_j data_j\\) where the product is over `j` such
    +-- that `segment_ids[j] == i`.
    +-- 
    +-- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    +-- <img style="width:100%" src="../../images/SegmentProd.png" alt>
    +-- </div>
    +segmentProd :: forall v1 v2 t tindices . (TensorType t,
    +                                          OneOf '[(Data.Complex.Complex Double),
    +                                                  (Data.Complex.Complex Float),
    +                                                  Data.Int.Int16,
    +                                                  Data.Int.Int32,
    +                                                  Data.Int.Int64, Data.Int.Int8,
    +                                                  Data.Word.Word16,
    +                                                  Data.Word.Word8, Double,
    +                                                  Float] t, TensorType tindices,
    +                                          OneOf '[Data.Int.Int32,
    +                                                  Data.Int.Int64] tindices) =>
    +               Tensor v1 t -- ^ __data__
    +               -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
    +                                     -- first dimension.  Values should be sorted and can be repeated.
    +               -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
    +               -- has size `k`, the number of segments.
    +segmentProd data' segment_ids | eqLengthGuard [] =
    +    buildOp (opDef "SegmentProd"
    +             & opAttr "T" .~ tensorType (undefined :: t)
    +             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
    +        data' segment_ids
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +attr {
    +  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
    +  name: "Tindices"
    +  type: "type"
    +}
    +input_arg { name: "data" type_attr: "T" }
    +input_arg {
    +  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
    +  name: "segment_ids"
    +  type_attr: "Tindices"
    +}
    +output_arg {
    +  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Reshapes a SparseTensor to represent values in a new dense shape.
    +--
    +-- This operation has the same semantics as reshape on the represented dense
    +-- tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
    +-- 
    +-- If one component of `new_shape` is the special value -1, the size of that
    +-- dimension is computed so that the total dense size remains constant.  At
    +-- most one component of `new_shape` can be -1.  The number of dense elements
    +-- implied by `new_shape` must be the same as the number of dense elements
    +-- originally implied by `input_shape`.
    +-- 
    +-- Reshaping does not affect the order of values in the SparseTensor.
    +-- 
    +-- If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
    +-- has length `R_out`, then `input_indices` has shape `[N, R_in]`,
    +-- `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
    +-- `output_shape` has length `R_out`.
    +sparseReshape :: Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
    +                                          -- SparseTensor.
    +                 -> Tensor v2 Data.Int.Int64 -- ^ __input_shape__: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
    +                 -> Tensor v3 Data.Int.Int64 -- ^ __new_shape__: 1-D.  `R_out` vector with the requested new dense shape.
    +                 -> (Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int64)
    +                 -- ^ (__output_indices__, __output_shape__)
    +                 --
    +                 -- * __output_indices__: 2-D.  `N x R_out` matrix with the updated indices of non-empty
    +                 -- values in the output SparseTensor.
    +                 --
    +                 -- * __output_shape__: 1-D.  `R_out` vector with the full dense shape of the output
    +                 -- SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
    +                 -- filled in.
    +sparseReshape input_indices input_shape new_shape | eqLengthGuard [] =
    +    buildOp (opDef "SparseReshape")
    +        input_indices input_shape new_shape
    +{-
    +input_arg {
    +  description: "2-D.  `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor."
    +  name: "input_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `R_in` vector with the input SparseTensor\'s dense shape."
    +  name: "input_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `R_out` vector with the requested new dense shape."
    +  name: "new_shape"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "2-D.  `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor."
    +  name: "output_indices"
    +  type: DT_INT64
    +}
    +output_arg {
    +  description: "1-D.  `R_out` vector with the full dense shape of the output\nSparseTensor.  This is the same as `new_shape` but with any -1 dimensions\nfilled in."
    +  name: "output_shape"
    +  type: DT_INT64
    +}
    +-}
    +
    +-- | Component-wise multiplies a SparseTensor by a dense Tensor.
    +--
    +-- The output locations corresponding to the implicitly zero elements in the sparse
    +-- tensor will be zero (i.e., will not take up storage space), regardless of the
    +-- contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
    +-- 
    +-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
    +-- the other direction.
    +sparseDenseCwiseMul :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                               OneOf '[(Data.Complex.Complex Double),
    +                                                       (Data.Complex.Complex Float),
    +                                                       Data.Int.Int16,
    +                                                       Data.Int.Int32,
    +                                                       Data.Int.Int64,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                -- SparseTensor, possibly not in canonical ordering.
    +                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
    +                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
    +                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
    +sparseDenseCwiseMul sp_indices sp_values sp_shape dense | eqLengthGuard [] =
    +    buildOp (opDef "SparseDenseCwiseMul"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sp_indices sp_values sp_shape dense
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "sp_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
    +  name: "sp_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "sp_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "`R`-D.  The dense Tensor operand."
    +  name: "dense"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D.  The `N` values that are operated on."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    +-- | Component-wise divides a SparseTensor by a dense Tensor.
    +--
    +-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
    +-- the other direction.
    +sparseDenseCwiseDiv :: forall v1 v2 v3 v4 t . (TensorType t,
    +                                               OneOf '[(Data.Complex.Complex Double),
    +                                                       (Data.Complex.Complex Float),
    +                                                       Data.Int.Int16,
    +                                                       Data.Int.Int32,
    +                                                       Data.Int.Int64,
    +                                                       Data.Int.Int8,
    +                                                       Data.Word.Word16,
    +                                                       Data.Word.Word8, Double,
    +                                                       Float] t) =>
    +                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
    +                                                -- SparseTensor, possibly not in canonical ordering.
    +                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
    +                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
    +                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
    +                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
    +sparseDenseCwiseDiv sp_indices sp_values sp_shape dense | eqLengthGuard [] =
    +    buildOp (opDef "SparseDenseCwiseDiv"
    +             & opAttr "T" .~ tensorType (undefined :: t))
    +        sp_indices sp_values sp_shape dense
    +{-
    +attr {
    +  allowed_values {
    +    list {
    +      type: DT_FLOAT
    +      type: DT_DOUBLE
    +      type: DT_INT64
    +      type: DT_INT32
    +      type: DT_UINT8
    +      type: DT_UINT16
    +      type: DT_INT16
    +      type: DT_INT8
    +      type: DT_COMPLEX64
    +      type: DT_COMPLEX128
    +      type: DT_QINT8
    +      type: DT_QUINT8
    +      type: DT_QINT32
    +      type: DT_HALF
    +    }
    +  }
    +  name: "T"
    +  type: "type"
    +}
    +input_arg {
    +  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
    +  name: "sp_indices"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
    +  name: "sp_values"
    +  type_attr: "T"
    +}
    +input_arg {
    +  description: "1-D.  Shape of the input SparseTensor."
    +  name: "sp_shape"
    +  type: DT_INT64
    +}
    +input_arg {
    +  description: "`R`-D.  The dense Tensor operand."
    +  name: "dense"
    +  type_attr: "T"
    +}
    +output_arg {
    +  description: "1-D.  The `N` values that are operated on."
    +  name: "output"
    +  type_attr: "T"
    +}
    +-}
    +
    + diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-core-ops-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt b/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt new file mode 100644 index 0000000..a68f2c8 --- /dev/null +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt @@ -0,0 +1,4576 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Haskell wrappers for Core Tensorflow Ops. +-- +-- Code generated signatures for the Ops in libtensorflow_c. +@package tensorflow-core-ops +@version 0.1.0.0 + +module TensorFlow.GenOps.Core + +-- | Receives the named tensor from send_device on recv_device. +-- +-- _HostRecv requires its input on host memory whereas _Recv requires its +-- input on device memory. +_HostRecv :: (TensorType tensor_type) => Int64 -> Tensor Value tensor_type + +-- | Receives the named tensor from send_device on recv_device. +_Recv :: (TensorType tensor_type) => Int64 -> Tensor Value tensor_type + +-- | Sends the named tensor from send_device to recv_device. +_Send :: (TensorType t) => Int64 -> Tensor v1 t -> ControlNode + +-- | A graph node which represents an argument to a function. +_Arg :: (TensorType t) => Int64 -> Tensor Value t + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implement of this algorithm, ms and mom will update +-- even if the grad is zero, but in this sparse implement, ms and mom +-- will not update in iterations the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +sparseApplyRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 tindices -> Tensor Value t + +-- | Update '*var' according to the Adam algorithm. +-- +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- +-- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - +-- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + +-- epsilon) +applyAdam :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Tensor v10 t -> Tensor Value t + +-- | Update relevant entries in '*var' and '*accum' according to the +-- momentum scheme. +-- +-- Set use_nesterov = True if you want to use Nesterov momentum. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- +-- accum = accum * momentum + grad var -= lr * accum +sparseApplyMomentum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Tensor Value t + +-- | Update '*var' according to the momentum scheme. Set use_nesterov = +-- True if you +-- +-- want to use Nesterov momentum. +-- +-- accum = accum * momentum + grad var -= lr * accum +applyMomentum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor Value t + +-- | Update '*var' according to the Ftrl-proximal scheme. +-- +-- accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +applyFtrl :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor Value t + +-- | Update entries in '*var' and '*accum' according to the proximal +-- adagrad scheme. +sparseApplyAdagradDA :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 Int64 -> Tensor Value t + +-- | Update relevant entries in '*var' and '*accum' according to the +-- adagrad scheme. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +sparseApplyAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Tensor Value t + +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning +-- rate. +-- +-- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var +-- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +applyProximalAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor Value t + +-- | Update '*var' according to the adagrad scheme. +-- +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +applyAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor Value t + +-- | Update '*var' according to the adadelta scheme. +-- +-- accum = rho() * accum + (1 - rho()) * grad.square(); update = +-- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; +-- update_accum = rho() * update_accum + (1 - rho()) * update.square(); +-- var -= update; +applyAdadelta :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor Value t + +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- That is for rows we have grad for, we update var as follows: prox_v = +-- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +sparseApplyProximalGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 tindices -> Tensor Value t + +-- | Update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +applyProximalGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor Value t + +-- | Encode strings into web-safe base64 format. +-- +-- Refer to the following article for more information on base64 format: +-- en.wikipedia.orgwikiBase64. Base64 strings may have padding +-- with '=' at the end so that the encoded has length multiple of 4. See +-- Padding section of the link above. +-- +-- Web-safe means that the encoder uses - and _ instead of + and /. +encodeBase64 :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Split elements of input based on delimiter into a +-- SparseTensor. +-- +-- Let N be the size of source (typically N will be the batch size). +-- Split each element of input based on delimiter and +-- return a SparseTensor containing the splitted tokens. Empty +-- tokens are ignored. +-- +-- delimiter can be empty or a single character. If +-- delimiter is an empty string, each element of input +-- is split into individual 1 character strings. +-- +-- For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', +-- then the output will be +-- +-- indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values = +-- [hello, world, a, b, c] +stringSplit :: Tensor v1 ByteString -> Tensor v2 ByteString -> (Tensor Value Int64, Tensor Value ByteString, Tensor Value Int64) + +-- | Joins the strings in the given list of string tensors into one tensor; +-- +-- with the given separator (default is an empty separator). +stringJoin :: [Tensor v1 ByteString] -> Tensor Value ByteString + +-- | Converts each entry in the given tensor to strings. Supports many +-- numeric +-- +-- types and boolean. +asString :: (TensorType t, OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v1 t -> Tensor Value ByteString + +-- | Converts each string in the input Tensor to its hash mod by a number +-- of buckets. +-- +-- The hash function is deterministic on the content of the string within +-- the process. The hash function is a keyed hash function, where +-- attribute key defines the key of the hash function. +-- key is an array of 2 elements. +-- +-- A strong hash is important when inputs may be malicious, e.g. URLs +-- with additional components. Adversaries could try to make their inputs +-- hash to the same bucket for a denial-of-service attack or to skew the +-- results. A strong hash prevents this by making it dificult, if not +-- infeasible, to compute inputs that hash to the same bucket. This comes +-- at a cost of roughly 4x higher compute time than +-- tf.string_to_hash_bucket_fast. +stringToHashBucketStrong :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 + +-- | Multiplies sparse updates into a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] *= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- *= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions multiply. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +scatterMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 t -> Tensor Value t + +-- | Joins a string Tensor across the given dimensions. +-- +-- Computes the string join across dimensions in the given string Tensor +-- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by +-- joining the input strings with the given separator (default: empty +-- string). Negative indices are counted backwards from the end, with +-- `-1` being equivalent to `n - 1`. Passing an empty +-- reduction_indices joins all strings in linear index order and +-- outputs a scalar string. +-- +-- For example: +-- +-- ``` # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, +-- 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] +-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] +-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] +-- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] +-- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] +-- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] +-- tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) +-- ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] ``` +reduceJoin :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor Value ByteString + +-- | Subtracts sparse updates to a variable reference. +-- +-- # Scalar indices ref[indices, ...] -= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- -= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their (negated) contributions add. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterSub.png" alt /div +scatterSub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 t -> Tensor Value t + +-- | Adds sparse updates to a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] += updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- += updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions add. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterAdd.png" alt /div +scatterAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 t -> Tensor Value t + +-- | Applies sparse updates to a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] = updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- = updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- If values in ref is to be updated more than once, because +-- there are duplicate entires in indices, the order at which +-- the updates happen for each value is undefined. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterUpdate.png" alt /div +scatterUpdate :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 t -> Tensor Value t + +-- | Update ref by subtracting value from it. +-- +-- This operation outputs "ref" after the update is done. This makes it +-- easier to chain operations that need to use the reset value. +assignSub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Update ref by adding value to it. +-- +-- This operation outputs "ref" after the update is done. This makes it +-- easier to chain operations that need to use the reset value. +assignAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes gradients for SparseSegmentMean. +-- +-- Returns tensor "output" with same shape as grad, except for dimension +-- 0 whose value is output_dim0. +sparseSegmentMeanGrad :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t + +-- | Applies softmax to a batched N-D SparseTensor. +-- +-- The inputs represent an N-D SparseTensor with logical shape `[..., B, +-- C]` (where `N >= 2`), and with indices sorted in the canonical +-- lexicographic order. +-- +-- This op is equivalent to applying the normal `tf.nn.softmax()` to each +-- innermost logical submatrix with shape `[B, C]`, but with the catch +-- that *the implicitly zero elements do not participate*. Specifically, +-- the algorithm is equivalent to the following: +-- +--
      +--
    1. Applies `tf.nn.softmax()` to a densified view of each innermost +-- submatrix with shape `[B, C]`, along the size-C dimension;
    2. +--
    3. Masks out the original implicitly-zero locations;
    4. +--
    5. Renormalizes the remaining elements.
    6. +--
    +-- +-- Hence, the SparseTensor result has exactly the same non-zero +-- indices and shape. +sparseSoftmax :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value t + +-- | Solves systems of linear equations. +-- +-- Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. Rhs is a tensor of shape +-- `[..., M, K]`. The output is a tensor shape `[..., M, K]`. If +-- adjoint is False then each output matrix satisfies +-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If +-- adjoint is True then each output matrix satisfies +-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. +matrixSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the eigen decomposition of one or more square self-adjoint +-- matrices. +-- +-- Computes the eigenvalues and (optionally) eigenvectors of each inner +-- matrix in input such that `input[..., :, :] = v[..., :, :] * +-- diag(e[..., :])`. +-- +-- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is +-- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = +-- self_adjoint_eig(a, compute_v=False) ``` +selfAdjointEigV2 :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t) + +-- | Computes the Eigen Decomposition of a batch of square self-adjoint +-- matrices. +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices, with the same constraints as the +-- single matrix SelfAdjointEig. +-- +-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the +-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. +selfAdjointEig :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Update '*var' by subtracting alpha * delta from it. +applyGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Push an element onto the stack. +stackPush :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value t + +-- | Computes the Cholesky decomposition of one or more square matrices. +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices, with the same constraints as the +-- single matrix Cholesky decomposition above. The output is a tensor of +-- the same shape as the input containing the Cholesky decompositions for +-- all input submatrices `[..., :, :]`. +cholesky :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Interleave the values from the `data` tensors into a single tensor. +-- +-- Builds a merged tensor such that +-- +-- merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] +-- +-- For example, if each `indices[m]` is scalar or vector, we have +-- +-- # Scalar indices merged[indices[m], ...] = data[m][...] +-- +-- # Vector indices merged[indices[m][i], ...] = data[m][i, ...] +-- +-- Each `data[i].shape` must start with the corresponding +-- `indices[i].shape`, and the rest of `data[i].shape` must be constant +-- w.r.t. i. That is, we must have `data[i].shape = +-- indices[i].shape + constant`. In terms of this constant, the +-- output shape is +-- +-- merged.shape = [max(indices)] + constant +-- +-- Values are merged in order, so if an index appears in both +-- `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice +-- `data[n][j]` will appear in the merged result. +-- +-- For example: +-- +-- indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, 3]] +-- data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, +-- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, +-- 22], [31, 32], [41, 42], [51, 52], [61, 62]] +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/DynamicStitch.png" alt /div +dynamicStitch :: (TensorType t) => [Tensor v1 Int32] -> [Tensor v2 t] -> Tensor Value t + +-- | Returns the number of work units this Reader has finished processing. +readerNumWorkUnitsCompleted :: Tensor v1 ByteString -> Tensor Value Int64 + +-- | Returns the next record (key, value pair) produced by a Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). +readerRead :: Tensor v1 ByteString -> Tensor v2 ByteString -> (Tensor Value ByteString, Tensor Value ByteString) + +-- | Compute the 2-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 2 dimensions of input. +fFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | A Reader that outputs fixed-length records from a file. +fixedLengthRecordReader :: Int64 -> Tensor Value ByteString + +-- | A placeholder op for a value that will be fed into the computation. +-- +-- N.B. This operation will fail with an error if it is executed. It is +-- intended as a way to represent a value that will always be fed, and to +-- provide attrs that enable the fed value to be checked at runtime. +placeholder :: (TensorType dtype) => Tensor Value dtype + +-- | Outputs a Summary protocol buffer with scalar values. +-- +-- The input tags and values must have the same shape. +-- The generated summary has a summary value for each tag-value pair in +-- tags and values. +scalarSummary :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString + +-- | Computes softmax activations. +-- +-- For each batch i and class j we have +-- +-- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) +softmax :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Generate a sharded filename. The filename is printf formatted as +-- +-- %s-%05d-of-%05d, basename, shard, num_shards. +shardedFilename :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Int32 -> Tensor Value ByteString + +-- | Sends the named tensor from send_device to recv_device. +-- +-- _HostSend requires its input on host memory whereas _Send requires its +-- input on device memory. +_HostSend :: (TensorType t) => Int64 -> Tensor v1 t -> ControlNode + +-- | Computes the gradient of the sigmoid of x wrt its input. +-- +-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and +-- dy is the corresponding input gradient. +sigmoidGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Greedily selects a subset of bounding boxes in descending order of +-- score, +-- +-- pruning away boxes that have high intersection-over-union (IOU) +-- overlap with previously selected boxes. Bounding boxes are supplied as +-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of +-- any diagonal pair of box corners and the coordinates can be provided +-- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note +-- that this algorithm is agnostic to where the origin is in the +-- coordinate system. Note that this algorithm is invariant to orthogonal +-- transformations and translations of the coordinate system; thus +-- translating or reflections of the coordinate system result in the same +-- boxes being selected by the algorithm. +-- +-- The output of this operation is a set of integers indexing into the +-- input collection of bounding boxes representing the selected boxes. +-- The bounding box coordinates corresponding to the selected indices can +-- then be obtained using the tf.gather operation. For example: +-- +-- selected_indices = tf.image.non_max_suppression( boxes, scores, +-- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, +-- selected_indices) +nonMaxSuppression :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor Value Int32 + +-- | A Reader that outputs the queued work as both the key and value. +-- +-- To use, enqueue strings in a Queue. ReaderRead will take the front +-- work string and output (work, work). +identityReader :: Tensor Value ByteString + +-- | Extracts a glimpse from the input tensor. +-- +-- Returns a set of windows called glimpses extracted at location +-- offsets from the input tensor. If the windows only partially +-- overlaps the inputs, the non overlapping areas will be filled with +-- random noise. +-- +-- The result is a 4-D tensor of shape `[batch_size, glimpse_height, +-- glimpse_width, channels]`. The channels and batch dimensions are the +-- same as that of the input tensor. The height and width of the output +-- windows are specified in the size parameter. +-- +-- The argument normalized and centered controls how +-- the windows are built: +-- +--
      +--
    • If the coordinates are normalized but not centered, 0.0 and 1.0 +-- correspond to the minimum and maximum of each height and width +-- dimension.
    • +--
    • If the coordinates are both normalized and centered, they range +-- from
    • +--
    • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper +-- left corner, the lower right corner is located at (1.0, 1.0) and the +-- center is at (0, 0).
    • +--
    • If the coordinates are not normalized they are interpreted as +-- numbers of pixels.
    • +--
    +extractGlimpse :: Tensor v1 Float -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value Float + +-- | Computes the gradients of 3-D convolution with respect to the input. +conv3DBackpropInput :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Solves one or more linear least-squares problems. +-- +-- matrix is a tensor of shape `[..., M, N]` whose inner-most 2 +-- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape +-- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each +-- output matrix solves each of the equations matrix[..., :, :] * +-- output[..., :, :] = rhs[..., :, :] in the least squares sense. +-- +-- matrix and right-hand sides in the batch: +-- +-- matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m +-- times k}\), output=\(X in Re^{n times k}\), +-- l2_regularizer=\(lambda\). +-- +-- If fast is True, then the solution is computed by +-- solving the normal equations using Cholesky decomposition. +-- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T +-- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in +-- Re^{n times k}} ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\) +-- then output is computed as \(X = A^T (A A^T + lambda I)^{-1} +-- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the +-- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n +-- times k}} ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast +-- path is only numerically stable when \(A\) is numerically full rank +-- and has a condition number \(mathrm{cond}(A) lt +-- frac{1}{sqrt{epsilon_{mach}}}\) or\(lambda\) is sufficiently large. +-- +-- If fast is False an algorithm based on the numerically +-- robust complete orthogonal decomposition is used. This computes the +-- minimum-norm least-squares solution, even when \(A\) is rank +-- deficient. This path is typically 6-7 times slower than the fast path. +-- If fast is False then l2_regularizer is +-- ignored. +matrixSolveLs :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 Double -> Tensor Value t + +-- | Converts one or more images from RGB to HSV. +-- +-- Outputs a tensor of the same shape as the images tensor, +-- containing the HSV value of the pixels. The output is only well +-- defined if the value in images are in `[0,1]`. +-- +-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, +-- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A +-- hue of 0 corresponds to pure red, hue 13 is pure green, and 23 +-- is pure blue. +rGBToHSV :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Decode the first frame of a GIF-encoded image to a uint8 tensor. +-- +-- GIF with frame or transparency compression are not supported convert +-- animated GIF from compressed to uncompressed by: +-- +-- convert $src.gif -coalesce $dst.gif +decodeGif :: Tensor v1 ByteString -> Tensor Value Word8 + +-- | Deprecated. Disallowed in GraphDef version >= 2. +adjustContrast :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> Tensor Value Float + +-- | DepthToSpace for tensors of type T. +-- +-- Rearranges data from depth into blocks of spatial data. This is the +-- reverse transformation of SpaceToDepth. More specifically, this op +-- outputs a copy of the input tensor where values from the +-- depth dimension are moved in spatial blocks to the +-- height and width dimensions. The attr +-- block_size indicates the input block size and how the data is +-- moved. +-- +--
      +--
    • Chunks of data of size `block_size * block_size` from depth are +-- rearranged into non-overlapping blocks of size `block_size x +-- block_size`
    • +--
    • The width the output tensor is `input_depth * block_size`, whereas +-- the height is `input_height * block_size`.
    • +--
    • The depth of the input tensor must be divisible by `block_size * +-- block_size`.
    • +--
    +-- +-- That is, assuming the input is in the shape: `[batch, height, width, +-- depth]`, the shape of the output will be: `[batch, height*block_size, +-- width*block_size, depth/(block_size*block_size)]` +-- +-- This operation requires that the input tensor be of rank 4, and that +-- block_size be >=1 and that `block_size * block_size` be a +-- divisor of the input depth. +-- +-- This operation is useful for resizing the activations between +-- convolutions (but keeping all data), e.g. instead of pooling. It is +-- also useful for training purely convolutional models. +-- +-- For example, given this input of shape `[1, 1, 1, 4]`, and a block +-- size of 2: +-- +-- ```prettyprint x = [[[[1, 2, 3, 4]]]] +-- +-- ``` +-- +-- This operation will output a tensor of shape `[1, 2, 2, 1]`: +-- +-- ```prettyprint [[[[1], [2]], [[3], [4]]]] ``` +-- +-- Here, the input has a batch of 1 and each batch element has shape `[1, +-- 1, 4]`, the corresponding output will have 2x2 elements and will have +-- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output +-- element shape is `[2, 2, 1]`. +-- +-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, +-- e.g. +-- +-- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` +-- +-- This operation, for block size of 2, will return the following tensor +-- of shape `[1, 2, 2, 3]` +-- +-- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] +-- +-- ``` +-- +-- Similarly, for the following input of shape `[1 2 2 4]`, and a block +-- size of 2: +-- +-- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], +-- [13, 14, 15, 16]]]] ``` +-- +-- the operator will return the following tensor of shape `[1 4 4 1]`: +-- +-- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ +-- [9], [10], [13], [14]], [ [11], [12], [15], [16]]] +-- +-- ``` +depthToSpace :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor Value t +batchMatrixSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the complementary error function of x element-wise. +erfc :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient of bilinear interpolation. +resizeBilinearGrad :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Float -> Tensor v2 t -> Tensor Value t + +-- | Output a fact about factorials. +fact :: Tensor Value ByteString + +-- | Delete the tensor specified by its handle in the session. +deleteSessionTensor :: Tensor v1 ByteString -> ControlNode + +-- | Returns the truth value of x OR y element-wise. +-- +--
      +--
    • NOTE*: LogicalOr supports broadcasting. More about +-- broadcasting here
    • +--
    +logicalOr :: Tensor v1 Bool -> Tensor v2 Bool -> Tensor Value Bool + +-- | Get the value of the tensor specified by its handle. +getSessionTensor :: (TensorType dtype) => Tensor v1 ByteString -> Tensor Value dtype +batchMatrixInverse :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Generate a glob pattern matching all sharded file names. +shardedFilespec :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor Value ByteString + +-- | Decode web-safe base64-encoded strings. +-- +-- Input may or may not have padding at the end. See EncodeBase64 for +-- padding. Web-safe means that input must use - and _ instead of + and +-- /. +decodeBase64 :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Store the input tensor in the state of the current session. +getSessionHandle :: (TensorType t) => Tensor v1 t -> Tensor Value ByteString + +-- | Table initializer that takes two tensors for keys and values +-- respectively. +initializeTable :: (TensorType tkey, TensorType tval) => Tensor v1 ByteString -> Tensor v2 tkey -> Tensor v3 tval -> ControlNode + +-- | Computes tan of x element-wise. +tan :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes hyperbolic tangent of x element-wise. +tanh :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Update '*var' according to the proximal adagrad scheme. +applyAdagradDA :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 Int64 -> Tensor Value t + +-- | Converts each string in the input Tensor to its hash mod by a number +-- of buckets. +-- +-- The hash function is deterministic on the content of the string within +-- the process. +-- +-- Note that the hash function may change from time to time. This +-- functionality will be deprecated and it's recommended to use +-- `tf.string_to_hash_bucket_fast()` or +-- `tf.string_to_hash_bucket_strong()`. +stringToHashBucket :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 + +-- | Computes gradients for the exponential linear (Elu) operation. +eluGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes gradient of the FractionalAvgPool function. +-- +-- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for +-- FractionalAvgPoolGrad, we just need to evenly back-propagate each +-- element of out_backprop to those indices that form the same pooling +-- cell. Therefore, we just need to know the shape of original input +-- tensor, instead of the whole tensor. +fractionalAvgPoolGrad :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor Value t + +-- | Solves systems of linear equations with upper or lower triangular +-- matrices by +-- +-- backsubstitution. +-- +-- matrix is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. If lower is True then +-- the strictly upper triangular part of each inner-most matrix is +-- assumed to be zero and not accessed. If lower is False then +-- the strictly lower triangular part of each inner-most matrix is +-- assumed to be zero and not accessed. rhs is a tensor of shape +-- `[..., M, K]`. +-- +-- The output is a tensor of shape `[..., M, K]`. If adjoint is +-- True then the innermost matrices in output` satisfy matrix +-- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If +-- adjoint is False then the strictly then the innermost +-- matrices in output satisfy matrix equations +-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. +matrixTriangularSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the (possibly normalized) Levenshtein Edit Distance. +-- +-- The inputs are variable-length sequences provided by SparseTensors +-- (hypothesis_indices, hypothesis_values, hypothesis_shape) and +-- (truth_indices, truth_values, truth_shape). +-- +-- The inputs are: +editDistance :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> Tensor Value Float + +-- | Computes the number of incomplete elements in the given barrier. +barrierIncompleteSize :: Tensor v1 ByteString -> Tensor Value Int32 + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Computes the number of complete elements in the given barrier. +barrierReadySize :: Tensor v1 ByteString -> Tensor Value Int32 + +-- | Closes the given barrier. +-- +-- This operation signals that no more new elements will be inserted in +-- the given barrier. Subsequent InsertMany that try to introduce a new +-- key will fail. Subsequent InsertMany operations that just add missing +-- components to already existing elements will continue to succeed. +-- Subsequent TakeMany operations will continue to succeed if sufficient +-- completed elements remain in the barrier. Subsequent TakeMany +-- operations that would block will fail immediately. +barrierClose :: Tensor v1 ByteString -> ControlNode + +-- | A Reader that outputs the lines of a file delimited by '\n'. +textLineReader :: Tensor Value ByteString + +-- | Compute the 3-dimensional discrete Fourier Transform over the +-- inner-most 3 +-- +-- dimensions of input. +fFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Exits the current frame to its parent frame. +-- +-- Exit makes its input `data` available to the parent frame. +refExit :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes exponential of x element-wise. \(y = e^x\). +exp :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Restores a tensor from checkpoint files. +-- +-- This is like Restore except that restored tensor can be +-- listed as filling only a slice of a larger tensor. +-- shape_and_slice specifies the shape of the larger tensor and +-- the slice that the restored tensor covers. +-- +-- The shape_and_slice input has the same format as the elements +-- of the shapes_and_slices input of the SaveSlices op. +restoreSlice :: (TensorType dt) => Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor v3 ByteString -> Tensor Value dt + +-- | Returns the complex conjugate of a complex number. +-- +-- Given a tensor input of complex numbers, this operation +-- returns a tensor of complex numbers that are the complex conjugate of +-- each element in input. The complex numbers in input +-- must be of the form \(a + bj\), where *a* is the real part and *b* is +-- the imaginary part. +-- +-- The complex conjugate returned by this operation is of the form \(a - +-- bj\). +-- +-- For example: +-- +-- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] +-- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` +conj :: (TensorType t, OneOf '[Complex Double, Complex Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient of nearest neighbor interpolation. +resizeNearestNeighborGrad :: (TensorType t, OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t + +-- | Delete the TensorArray from its resource container. This enables +-- +-- the user to close and release the resource in the middle of a +-- step/run. +tensorArrayClose :: Tensor v1 ByteString -> ControlNode + +-- | Computes atan of x element-wise. +atan :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Get the current size of the TensorArray. +tensorArraySize :: Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value Int32 + +-- | Concat the elements from the TensorArray into value value. +-- +-- Takes T elements of shapes +-- +-- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 +-- x ...) ``` +-- +-- and concatenates them into a Tensor of shape: +-- +-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` +-- +-- All elements must have the same shape (excepting the first dimension). +tensorArrayConcat :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Float -> (Tensor Value dtype, Tensor Value Int64) + +-- | Local Response Normalization. +-- +-- The 4-D input tensor is treated as a 3-D array of 1-D vectors +-- (along the last dimension), and each vector is normalized +-- independently. Within a given vector, each component is divided by the +-- weighted, squared sum of inputs within depth_radius. In +-- detail, +-- +-- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + +-- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** +-- beta +-- +-- For details, see Krizhevsky et al., ImageNet classification with +-- deep convolutional neural networks (NIPS 2012). +lRN :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Converts each string in the input Tensor to its hash mod by a number +-- of buckets. +-- +-- The hash function is deterministic on the content of the string within +-- the process and will never change. However, it is not suitable for +-- cryptography. This function may be used when CPU time is scarce and +-- inputs are trusted or unimportant. There is a risk of adversaries +-- constructing inputs that all hash to the same bucket. To prevent this +-- problem, use a strong hash function with +-- `tf.string_to_hash_bucket_strong`. +stringToHashBucketFast :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 + +-- | Pack the elements from the TensorArray into output value. +-- +--
      +--
    • *WARNING: This op is deprecated.**
    • +--
    +-- +-- Instead of this op, use TensorArrayGather with `indices = +-- RangeOp(0, TensorArraySizeOp)`. +-- +-- All elements must have the same shape. +tensorArrayPack :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value dtype + +-- | Computes offsets of concat inputs within its output. +-- +-- For example: +-- +-- ```prettyprint # x is [2, 2, 7] # y is [2, 3, 7] # +-- z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], +-- [0, 2, 0], [0, 5, 0] ``` +concatOffset :: Tensor v1 Int32 -> [Tensor v2 Int32] -> [Tensor Value Int32] + +-- | Creates or finds a child frame, and makes `data` available to the +-- child frame. +-- +-- The unique frame_name is used by the Executor to +-- identify frames. If is_constant is true, output is a +-- constant in the child frame; otherwise it may be changed in the child +-- frame. At most parallel_iterations iterations are run in +-- parallel in the child frame. +refEnter :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes softsign: `features / (abs(features) + 1)`. +softsign :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Push an element onto the tensor_array. +tensorArrayWrite :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Tensor Value Float + +-- | Returns a diagonal tensor with a given diagonal values. +-- +-- Given a diagonal, this operation returns a tensor with the +-- diagonal and everything else padded with zeros. The diagonal +-- is computed as follows: +-- +-- Assume diagonal has dimensions [D1,..., Dk], then the output +-- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: +-- +-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 +-- everywhere else. +-- +-- For example: +-- +-- ```prettyprint # diagonal is [1, 2, 3, 4] tf.diag(diagonal) +-- ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` +diag :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns the batched diagonal part of a batched tensor. +-- +-- This operation returns a tensor with the diagonal part of the +-- batched input. The diagonal part is computed as +-- follows: +-- +-- Assume input has k dimensions `[I, J, K, ..., N, +-- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, +-- J, K, ..., N]` where: +-- +-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. +-- +-- The input must be at least a matrix. +-- +-- For example: +-- +-- ```prettyprint # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, +-- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, +-- 8]]] +-- +-- and input.shape = (2, 4, 4) +-- +-- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] +-- +-- which has shape (2, 4) ``` +matrixDiagPart :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes the number of elements in the given queue. +queueSize :: Tensor v1 ByteString -> Tensor Value Int32 + +-- | Decode a PNG-encoded image to a uint8 or uint16 tensor. +-- +-- The attr channels indicates the desired number of color +-- channels for the decoded image. +-- +-- Accepted values are: +-- +--
      +--
    • 0: Use the number of channels in the PNG-encoded image.
    • +--
    • 1: output a grayscale image.
    • +--
    • 3: output an RGB image.
    • +--
    • 4: output an RGBA image.
    • +--
    +-- +-- If needed, the PNG-encoded image is transformed to match the requested +-- number of color channels. +decodePng :: (TensorType dtype, OneOf '[Word16, Word8] dtype) => Tensor v1 ByteString -> Tensor Value dtype + +-- | Returns element-wise smallest integer in not less than x. +ceil :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | A queue that produces elements sorted by the first component value. +-- +-- Note that the PriorityQueue requires the first component of any +-- element to be a scalar int64, in addition to the other elements +-- declared by component_types. Therefore calls to Enqueue and +-- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will +-- all require (resp. output) one extra entry in their input (resp. +-- output) lists. +priorityQueue :: Tensor Value ByteString + +-- | A placeholder op that passes though input when its output is +-- not fed. +placeholderWithDefault :: (TensorType dtype) => Tensor v1 dtype -> Tensor Value dtype + +-- | Computes the gradient of the crop_and_resize op wrt the input image +-- tensor. +cropAndResizeGradImage :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t + +-- | Restore a Reader to its initial clean state. +readerReset :: Tensor v1 ByteString -> ControlNode + +-- | Extract patches from images and put them in the +-- "depth" output dimension. +extractImagePatches :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t +batchMatrixSetDiag :: (TensorType t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Delete the stack from its resource container. +stackClose :: Tensor v1 ByteString -> ControlNode + +-- | Quantizes then dequantizes a tensor. +-- +-- This op simulates the precision loss from the quantized forward pass +-- by: 1. Quantizing the tensor to fixed point numbers, which should +-- match the target quantization method when it is used in inference. 2. +-- Dequantizing it back to floating point numbers for the following ops, +-- most likely matmul. +-- +-- There are different ways to quantize. This version does not use the +-- full range of the output type, choosing to elide the lowest possible +-- value for symmetry (e.g., output range is -127 to 127, not -128 to 127 +-- for signed 8 bit quantization), so that 0.0 maps to 0. +-- +-- To perform this op, we first find the range of values in our tensor. +-- The range we use is always centered on 0, so we find m such that +-- +--
      +--
    1. m = max(abs(input_min), abs(input_max)) if range_given is +-- true,
    2. +--
    3. m = max(max(abs(min_elem(input)), abs(max_elem(input))) +-- otherwise.
    4. +--
    +-- +-- Our input tensor range is then [-m, m]. +-- +-- Next, we choose our fixed-point quantization buckets, [min_fixed, +-- max_fixed]. If signed_input is true, this is +-- +--
      +--
    • min_fixed, max_fixed =
    • +--
    • -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - +-- 1 .
    • +--
    +-- +-- Otherwise, if signed_input is false, the fixed-point range is +-- +--
      +--
    • min_fixed, max_fixed = [0, (1 << num_bits) - 1].
    • +--
    +-- +-- From this we compute our scaling factor, s: +-- +-- s = (max_fixed - min_fixed) / (2 * m). +-- +-- Now we can quantize and dequantize the elements of our tensor. An +-- element e is transformed into e': +-- +-- e' = (e * s).round_to_nearest() / s. +-- +-- Note that we have a different number of buckets in the signed vs. +-- unsigned cases. For example, if num_bits == 8, we get 254 buckets in +-- the signed case vs. 255 in the unsigned case. +-- +-- For example, suppose num_bits = 8 and m = 1. Then +-- +--
      +--
    • min_fixed, max_fixed = [-127, 127], and s = (127 + 127) / 2 +-- = 127.
    • +--
    +-- +-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, +-- 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}. +quantizeAndDequantize :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns which elements of x are NaN. +isNan :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool + +-- | Returns locations of true values in a boolean tensor. +-- +-- This operation returns the coordinates of true elements in +-- input. The coordinates are returned in a 2-D tensor where the +-- first dimension (rows) represents the number of true elements, and the +-- second dimension (columns) represents the coordinates of the true +-- elements. Keep in mind, the shape of the output tensor can vary +-- depending on how many true values there are in input. Indices +-- are output in row-major order. +-- +-- For example: +-- +-- ```prettyprint # input tensor is [[True, False] # [True, +-- False]] # input has two true values, so output has two +-- coordinates. # input has rank of 2, so coordinates have two +-- indices. where(input) ==> [[0, 0], [1, 0]] +-- +-- # input tensor is [[[True, False] # [True, False]] # [[False, +-- True] # [False, True]] # [[False, False] # [False, True]]] # +-- input has 5 true values, so output has 5 coordinates. # +-- input has rank of 3, so coordinates have three indices. +-- where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, +-- 1, 1]] ``` +where' :: Tensor v1 Bool -> Tensor Value Int64 + +-- | Computes the difference between two lists of numbers or strings. +-- +-- Given a list x and a list y, this operation returns +-- a list out that represents all values that are in x +-- but not in y. The returned list out is sorted in the +-- same order that the numbers appear in x (duplicates are +-- preserved). This operation also returns a list idx that +-- represents the position of each out element in x. In +-- other words: +-- +-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` +-- +-- For example, given this input: +-- +-- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` +-- +-- This operation would return: +-- +-- ```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` +listDiff :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value out_idx) + +-- | Return a strided slice from input. +-- +-- The output tensor is a tensor with dimensions implied by +-- begin, end, and strides, whose values are +-- extracted from begin. +-- +-- Specifically, the result tensor at index `(i[0], i[1], ..., i[n-1])` +-- will obtain the value `input[begin[0] + i[0] * stride[0], ..., ` +-- `begin[n-1] + i[n-1] * stride[n-1])]`. +-- +--
      +--
    • Requirements*: `0 != strides[i] for i in [0, n)`
    • +--
    +stridedSlice :: (TensorType index, OneOf '[Int32, Int64] index, TensorType t) => Tensor v1 t -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor Value t + +-- | A queue that randomizes the order of elements. +randomShuffleQueue :: Tensor Value ByteString + +-- | Returns the gradient of Tile. +-- +-- Since Tile takes an input and repeats the input +-- multiples times along each dimension, TileGrad takes +-- in multiples and aggregates each repeated tile of +-- input into output. +tileGrad :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t + +-- | Assign value to the sliced l-value reference of ref. +-- +-- The values of value are assigned to the positions in the +-- variable ref that are selected by the slice parameters. The +-- slice parameters `begin, end, strides, etc. work +-- exactly as in StridedSlice. +-- +-- NOTE this op currently does not support broadcasting and so +-- value's shape must be exactly the shape produced by the slice +-- of ref. +stridedSliceAssign :: (TensorType index, OneOf '[Int32, Int64] index, TensorType t) => Tensor v1 t -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor v5 t -> Tensor Value t + +-- | Reshapes a tensor. +-- +-- Given tensor, this operation returns a tensor that has the +-- same values as tensor with shape shape. +-- +-- If one component of shape is the special value -1, the size of +-- that dimension is computed so that the total size remains constant. In +-- particular, a shape of `[-1]` flattens into 1-D. At most one +-- component of shape can be -1. +-- +-- If shape is 1-D or higher, then the operation returns a tensor +-- with shape shape filled with the values of tensor. In +-- this case, the number of elements implied by shape must be the +-- same as the number of elements in tensor. +-- +-- For example: +-- +-- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # +-- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], +-- [4, 5, 6], [7, 8, 9]] +-- +-- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor +-- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, +-- 2], [3, 3, 4, 4]] +-- +-- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, +-- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, +-- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, +-- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] +-- +-- # -1 can also be used to infer the shape +-- +-- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, +-- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: +-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, +-- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) +-- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, +-- 6, 6]]] +-- +-- # tensor t is [7] # shape `[]` reshapes to a scalar +-- reshape(t, []) ==> 7 ``` +reshape :: (TensorType t, TensorType tshape, OneOf '[Int32, Int64] tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor Value t + +-- | A queue that produces elements in first-in first-out order. +fIFOQueue :: Tensor Value ByteString + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Performs fractional average pooling on the input. +-- +-- Fractional average pooling is similar to Fractional max pooling in the +-- pooling region generation step. The only difference is that after +-- pooling regions are generated, a mean operation is performed instead +-- of a max operation in each pooling region. +fractionalAvgPool :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64) + +-- | Randomly crop image. +-- +-- size is a 1-D int64 tensor with 2 elements representing the +-- crop height and width. The values must be non negative. +-- +-- This Op picks a random location in image and crops a +-- height by width rectangle from that location. The +-- random location is picked so the cropped area will fit inside the +-- original image. +randomCrop :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor Value t + +-- | Cast x of type SrcT to y of DstT. +-- +-- _HostCast requires its input and produces its output in host memory. +_HostCast :: (TensorType dstT, TensorType srcT) => Tensor v1 srcT -> Tensor Value dstT + +-- | Closes the given queue. +-- +-- This operation signals that no more elements will be enqueued in the +-- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent +-- Dequeue(Many) operations will continue to succeed if sufficient +-- elements remain in the queue. Subsequent Dequeue(Many) operations that +-- would block will fail immediately. +queueClose :: Tensor v1 ByteString -> ControlNode + +-- | Return a slice from input. +-- +-- The output tensor is a tensor with dimensions described by size +-- whose values are extracted from input starting at the offsets +-- in begin. +-- +--
      +--
    • Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di +-- for i in [0, n)
    • +--
    +slice :: (TensorType index, OneOf '[Int32, Int64] index, TensorType t) => Tensor v1 t -> Tensor v2 index -> Tensor v3 index -> Tensor Value t + +-- | Returns the gradient of StridedSlice. +-- +-- Since StridedSlice cuts out pieces of its input +-- which is size shape, its gradient will have the same shape +-- (which is passed here as shape). The gradient will be zero in +-- any element that the slice does not select. +-- +-- Arguments are the same as StridedSliceGrad with the exception that +-- dy is the input gradient to be propagated and shape is +-- the shape of StridedSlice's input. +stridedSliceGrad :: (TensorType index, OneOf '[Int32, Int64] index, TensorType t) => Tensor v1 index -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor v5 t -> Tensor Value t + +-- | Adds up a SparseTensor and a dense Tensor, producing a +-- dense Tensor. +-- +-- This Op does not require a_indices be sorted in standard +-- lexicographic order. +sparseTensorDenseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tindices -> Tensor v2 t -> Tensor v3 tindices -> Tensor v4 t -> Tensor Value t + +-- | Returns the size of a tensor. +-- +-- This operation returns an integer representing the number of elements +-- in input. +-- +-- For example: +-- +-- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]]] size(t) ==> 12 ``` +size :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => Tensor v1 t -> Tensor Value out_type + +-- | Defines a barrier that persists across different graph executions. +-- +-- A barrier represents a key-value map, where each key is a string, and +-- each value is a tuple of tensors. +-- +-- At runtime, the barrier contains complete and +-- incomplete elements. A complete element has defined tensors +-- for all components of its value tuple, and may be accessed using +-- BarrierTakeMany. An incomplete element has some undefined components +-- in its value tuple, and may be updated using BarrierInsertMany. +barrier :: Tensor Value ByteString + +-- | Computes the log of the absolute value of `Gamma(x)` element-wise. +lgamma :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Decode a JPEG-encoded image to a uint8 tensor. +-- +-- The attr channels indicates the desired number of color +-- channels for the decoded image. +-- +-- Accepted values are: +-- +--
      +--
    • 0: Use the number of channels in the JPEG-encoded image.
    • +--
    • 1: output a grayscale image.
    • +--
    • 3: output an RGB image.
    • +--
    +-- +-- If needed, the JPEG-encoded image is transformed to match the +-- requested number of color channels. +-- +-- The attr ratio allows downscaling the image by an integer +-- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is +-- much faster than downscaling the image later. +decodeJpeg :: Tensor v1 ByteString -> Tensor Value Word8 + +-- | Returns shape of tensors. +-- +-- This operation returns N 1-D integer tensors representing shape of +-- `input[i]s`. +shapeN :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => [Tensor v1 t] -> [Tensor Value out_type] + +-- | Generates labels for candidate sampling with a uniform distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Finds unique elements in a 1-D tensor. +-- +-- This operation returns a tensor y containing all of the +-- unique elements of x sorted in the same order that they occur +-- in x. This operation also returns a tensor idx the +-- same size as x that contains the index of each value of +-- x in the unique output y. In other words: +-- +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` +-- +-- For example: +-- +-- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, +-- idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, +-- 3, 4, 4] ``` +unique :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> (Tensor Value t, Tensor Value out_idx) + +-- | Draw bounding boxes on a batch of images. +-- +-- Outputs a copy of images but draws on top of the pixels zero +-- or more bounding boxes specified by the locations in boxes. +-- The coordinates of the each bounding box in boxes are encoded +-- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are +-- floats in `[0.0, 1.0]` relative to the width and height of the +-- underlying image. +-- +-- For example, if an image is 100 x 200 pixels and the bounding box is +-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of +-- the bounding box will be `(10, 40)` to `(50, 180)`. +-- +-- Parts of the bounding box may fall outside the image. +drawBoundingBoxes :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor Value t + +-- | Split the data from the input value into TensorArray elements. +-- +-- Assuming that lengths takes on values +-- +-- ```(n0, n1, ..., n(T-1))``` +-- +-- and that value has shape +-- +-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, +-- +-- this splits values into a TensorArray with T tensors. +-- +-- TensorArray index t will be the subtensor of values with starting +-- position +-- +-- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` +-- +-- and having size +-- +-- ```nt x d0 x d1 x ...``` +tensorArraySplit :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Float -> Tensor Value Float + +-- | Splits a tensor into num_split tensors along one dimension. +split :: (TensorType t) => Int64 -> Tensor v1 Int32 -> Tensor v2 t -> [Tensor Value t] + +-- | Computes the maximum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = max_j(data_j)\) where +-- max is over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentMax.png" alt /div +segmentMax :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t + +-- | Raise a exception to abort the process when called. +-- +-- Returns nothing but an exception. +abort :: ControlNode + +-- | Reorders a SparseTensor into the canonical, row-major ordering. +-- +-- Note that by convention, all sparse ops preserve the canonical +-- ordering along increasing dimension number. The only time ordering can +-- be violated is during manual manipulation of the indices and values +-- vectors to add entries. +-- +-- Reordering does not affect the shape of the SparseTensor. +-- +-- If the tensor has rank R and N non-empty values, +-- input_indices has shape `[N, R]`, input_values has length +-- N, and input_shape has length R. +sparseReorder :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> (Tensor Value Int64, Tensor Value t) + +-- | Computes the gradient for the rsqrt of x wrt its input. +-- +-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and +-- dy is the corresponding input gradient. +rsqrtGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Reverses variable length slices. +-- +-- This op first slices input along the dimension +-- batch_dim, and for each slice i, reverses the first +-- `seq_lengths[i]` elements along the dimension seq_dim. +-- +-- The elements of seq_lengths must obey `seq_lengths[i] < +-- input.dims[seq_dim]`, and seq_lengths must be a vector of +-- length `input.dims[batch_dim]`. +-- +-- The output slice i along dimension batch_dim is then +-- given by input slice i, with the first `seq_lengths[i]` +-- slices along dimension seq_dim reversed. +-- +-- For example: +-- +-- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = +-- (4, 8, ...) seq_lengths = [7, 2, 3, 5] +-- +-- # then slices of input are reversed on seq_dim, but only up to +-- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] +-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, +-- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, +-- 5:0:-1, :, ...] +-- +-- # while entries past seq_lens are copied through: output[0, 7:, :, +-- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, +-- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, +-- ...] = input[3, 2:, :, ...] ``` +-- +-- In contrast, if: +-- +-- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = +-- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] +-- +-- # then slices of input are reversed on seq_dim, but only up to +-- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] +-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, +-- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = +-- input[5:0:-1, :, 3, :, ...] +-- +-- # while entries past seq_lens are copied through: output[7:, :, 0, :, +-- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, +-- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] +-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` +reverseSequence :: (TensorType t, TensorType tlen, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v1 t -> Tensor v2 tlen -> Tensor Value t + +-- | Returns the number of records this Reader has produced. +-- +-- This is the same as the number of ReaderRead executions that have +-- succeeded. +readerNumRecordsProduced :: Tensor v1 ByteString -> Tensor Value Int64 + +-- | Deserialize and concatenate SparseTensors from a serialized +-- minibatch. +-- +-- The input serialized_sparse must be a string matrix of shape +-- `[N x 3]` where N is the minibatch size and the rows +-- correspond to packed outputs of SerializeSparse. The ranks of +-- the original SparseTensor objects must all match. When the +-- final SparseTensor is created, it has rank one higher than +-- the ranks of the incoming SparseTensor objects (they have +-- been concatenated along a new row dimension). +-- +-- The output SparseTensor object's shape values for all +-- dimensions but the first are the max across the input +-- SparseTensor objects' shape values for the corresponding +-- dimensions. Its first shape value is N, the minibatch size. +-- +-- The input SparseTensor objects' indices are assumed ordered +-- in standard lexicographic order. If this is not the case, after this +-- step run SparseReorder to restore index ordering. +-- +-- For example, if the serialized input is a `[2 x 3]` matrix +-- representing two original SparseTensor objects: +-- +-- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] +-- +-- and +-- +-- index = [ 2] [10] values = [4, 5] shape = [30] +-- +-- then the final deserialized SparseTensor will be: +-- +-- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] +-- shape = [2 50] +deserializeManySparse :: (TensorType dtype) => Tensor v1 ByteString -> (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) + +-- | Returns immutable tensor from memory region. +-- +-- The current implementation memmaps the tensor from a file. +immutableConst :: (TensorType dtype) => Tensor Value dtype + +-- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. +-- +--
      +--
    • NOTE*: Minimum supports broadcasting. More about +-- broadcasting here
    • +--
    +minimum :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Initializes a table from a text file. +-- +-- It inserts one key-value pair into the table for each line of the +-- file. The key and value is extracted from the whole line content, +-- elements from the split line based on delimiter or the line +-- number (starting from zero). Where to extract the key and value from a +-- line is specified by key_index and value_index. +-- +--
      +--
    • A value of -1 means use the line number(starting from zero), +-- expects int64.
    • +--
    • A value of -2 means use the whole line content, expects +-- string.
    • +--
    • A value >= 0 means use the index (starting at zero) of the +-- split line based on delimiter.
    • +--
    +initializeTableFromTextFile :: Int64 -> Int64 -> Tensor v1 ByteString -> Tensor v2 ByteString -> ControlNode + +-- | Returns the diagonal part of the tensor. +-- +-- This operation returns a tensor with the diagonal part of the +-- input. The diagonal part is computed as follows: +-- +-- Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then +-- the output is a tensor of rank k with dimensions `[D1,..., +-- Dk]` where: +-- +-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. +-- +-- For example: +-- +-- ```prettyprint # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, +-- 3, 0] [0, 0, 0, 4]] +-- +-- tf.diag_part(input) ==> [1, 2, 3, 4] ``` +diagPart :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes natural logarithm of x element-wise. +-- +-- I.e., \(y = log_e x\). +log :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Scatter the data from the input value into specific TensorArray +-- elements. +-- +-- indices must be a vector, its length must match the first dim +-- of value. +tensorArrayScatter :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Tensor Value Float + +-- | Returns the rank of a tensor. +-- +-- This operation returns an integer representing the rank of +-- input. +-- +-- For example: +-- +-- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 +-- ``` +-- +--
      +--
    • *Note**: The rank of a tensor is not the same as the rank of a +-- matrix. The rank of a tensor is the number of indices required to +-- uniquely select each element of the tensor. Rank is also known as +-- "order", "degree", or "ndims."
    • +--
    +rank :: (TensorType t) => Tensor v1 t -> Tensor Value Int32 + +-- | Return a tensor with the same shape and contents as the input tensor +-- or value. +identity :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Adjust the contrast of one or more images. +-- +-- images is a tensor of at least 3 dimensions. The last 3 +-- dimensions are interpreted as `[height, width, channels]`. The other +-- dimensions only represent a collection of images, such as `[batch, +-- height, width, channels].` +-- +-- Contrast is adjusted independently for each channel of each image. +-- +-- For each channel, the Op first computes the mean of the image pixels +-- in the channel and then adjusts each component of each pixel to `(x - +-- mean) * contrast_factor + mean`. +adjustContrastv2 :: Tensor v1 Float -> Tensor v2 Float -> Tensor Value Float + +-- | Sparse update entries in '*var' and '*accum' according to FOBOS +-- algorithm. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / +-- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +sparseApplyProximalAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 tindices -> Tensor Value t + +-- | Gather slices from params according to indices. +-- +-- indices must be an integer tensor of any dimension (usually +-- 0-D or 1-D). Produces an output tensor with shape `indices.shape + +-- params.shape[1:]` where: +-- +-- # Scalar indices output[:, ..., :] = params[indices, :, ... :] +-- +-- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] +-- +-- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, +-- ..., j], :, ..., :] +-- +-- If indices is a permutation and `len(indices) == +-- params.shape[0]` then this operation will permute params +-- accordingly. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/Gather.png" alt /div +gather :: (TensorType tindices, OneOf '[Int32, Int64] tindices, TensorType tparams) => Tensor v1 tparams -> Tensor v2 tindices -> Tensor Value tparams + +-- | Checks whether a tensor has been initialized. +-- +-- Outputs boolean scalar indicating whether the tensor has been +-- initialized. +isVariableInitialized :: (TensorType dtype) => Tensor v1 dtype -> Tensor Value Bool + +-- | Concatenates tensors along one dimension. +concat :: (TensorType t) => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t + +-- | Outputs random integers from a uniform distribution. +-- +-- The generated values are uniform integers in the range `[minval, +-- maxval)`. The lower bound minval is included in the range, +-- while the upper bound maxval is excluded. +-- +-- The random integers are slightly biased unless `maxval - minval` is an +-- exact power of two. The bias is small for values of `maxval - minval` +-- significantly smaller than the range of the output (either `2^32` or +-- `2^64`). +randomUniformInt :: (TensorType t, OneOf '[Int32, Int64] t, TensorType tout, OneOf '[Int32, Int64] tout) => Tensor v1 t -> Tensor v2 tout -> Tensor v3 tout -> Tensor Value tout + +-- | Stops gradient computation. +-- +-- When executed in a graph, this op outputs its input tensor as-is. +-- +-- When building ops to compute gradients, this op prevents the +-- contribution of its inputs to be taken into account. Normally, the +-- gradient generator adds ops to a graph to compute the derivatives of a +-- specified loss by recursively finding out inputs that +-- contributed to its computation. If you insert this op in the graph it +-- inputs are masked from the gradient generator. They are not taken into +-- account for computing gradients. +-- +-- This is useful any time you want to compute a value with TensorFlow +-- but need to pretend that the value was a constant. Some examples +-- include: +-- +--
      +--
    • The *EM* algorithm where the *M-step* should not involve +-- backpropagation through the output of the *E-step*.
    • +--
    • Contrastive divergence training of Boltzmann machines where, when +-- differentiating the energy function, the training must not +-- backpropagate through the graph that generated the samples from the +-- model.
    • +--
    • Adversarial training, where no backprop should happen through the +-- adversarial example generation process.
    • +--
    +stopGradient :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Performs average pooling on the input. +-- +-- Each entry in output is the mean of the corresponding size +-- ksize window in value. +avgPool :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | A Reader that outputs the entire contents of a file as a value. +-- +-- To use, enqueue filenames in a Queue. The output of ReaderRead will be +-- a filename (key) and the contents of that file (value). +wholeFileReader :: Tensor Value ByteString + +-- | Forwards `data` to the output port determined by pred. +-- +-- If pred is true, the `data` input is forwarded to +-- output_true. Otherwise, the data goes to +-- output_false. +-- +-- See also RefSwitch and Merge. +switch :: (TensorType t) => Tensor v1 t -> Tensor v2 Bool -> (Tensor Value t, Tensor Value t) + +-- | Outputs random values from a normal distribution. +-- +-- The generated values will have mean 0 and standard deviation 1. +randomStandardNormal :: (TensorType t, OneOf '[Int32, Int64] t, TensorType dtype, OneOf '[Word16, Double, Float] dtype) => Tensor v1 t -> Tensor Value dtype + +-- | Computes sigmoid of x element-wise. +-- +-- Specifically, `y = 1 / (1 + exp(-x))`. +sigmoid :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Generate a single randomly distorted bounding box for an image. +-- +-- Bounding box annotations are often supplied in addition to +-- ground-truth labels in image recognition or object localization tasks. +-- A common technique for training such a system is to randomly distort +-- an image while preserving its content, i.e. *data augmentation*. This +-- Op outputs a randomly distorted localization of an object, i.e. +-- bounding box, given an image_size, bounding_boxes +-- and a series of constraints. +-- +-- The output of this Op is a single bounding box that may be used to +-- crop the original image. The output is returned as 3 tensors: +-- begin, size and bboxes. The first 2 tensors +-- can be fed directly into `tf.slice` to crop the image. The latter may +-- be supplied to `tf.image.draw_bounding_box` to visualize what the +-- bounding box looks like. +-- +-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, +-- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` +-- relative to the width and height of the underlying image. +-- +-- For example, +-- +-- # Generate a single distorted bounding box. begin, size, bbox_for_draw +-- = tf.image.sample_distorted_bounding_box( tf.shape(image), +-- bounding_boxes=bounding_boxes) +-- +-- # Draw the bounding box in an image summary. image_with_box = +-- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) +-- tf.image_summary(images_with_box, image_with_box) +-- +-- # Employ the bounding box to distort the image. distorted_image = +-- tf.slice(image, begin, size) +-- +-- Note that if no bounding box information is available, setting +-- `use_image_if_no_bounding_boxes = true` will assume there is a single +-- implicit bounding box covering the whole image. If +-- use_image_if_no_bounding_boxes is false and no bounding boxes +-- are supplied, an error is raised. +sampleDistortedBoundingBox :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v1 t -> Tensor v2 Float -> (Tensor Value t, Tensor Value t, Tensor Value Float) + +-- | Returns the truth value of (x > y) element-wise. +-- +--
      +--
    • NOTE*: Greater supports broadcasting. More about +-- broadcasting here
    • +--
    +greater :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Makes its input available to the next iteration. +refNextIteration :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | SpaceToDepth for tensors of type T. +-- +-- Rearranges blocks of spatial data, into depth. More specifically, this +-- op outputs a copy of the input tensor where values from the +-- height and width dimensions are moved to the +-- depth dimension. The attr block_size indicates the +-- input block size and how the data is moved. +-- +--
      +--
    • Non-overlapping blocks of size `block_size x block size` are +-- rearranged into depth at each location.
    • +--
    • The depth of the output tensor is `input_depth * block_size * +-- block_size`.
    • +--
    • The input tensor's height and width must be divisible by +-- block_size.
    • +--
    +-- +-- That is, assuming the input is in the shape: `[batch, height, width, +-- depth]`, the shape of the output will be: `[batch, +-- heightblock_size, widthblock_size, +-- depth*block_size*block_size]` +-- +-- This operation requires that the input tensor be of rank 4, and that +-- block_size be >=1 and a divisor of both the input +-- height and width. +-- +-- This operation is useful for resizing the activations between +-- convolutions (but keeping all data), e.g. instead of pooling. It is +-- also useful for training purely convolutional models. +-- +-- For example, given this input of shape `[1, 2, 2, 1]`, and block_size +-- of 2: +-- +-- ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` +-- +-- This operation will output a tensor of shape `[1, 1, 1, 4]`: +-- +-- ```prettyprint [[[[1, 2, 3, 4]]]] ``` +-- +-- Here, the input has a batch of 1 and each batch element has shape `[2, +-- 2, 1]`, the corresponding output will have a single element (i.e. +-- width and height are both 1) and will have a depth of 4 channels (1 * +-- block_size * block_size). The output element shape is `[1, 1, 4]`. +-- +-- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, +-- e.g. +-- +-- ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, +-- 12]]]] ``` +-- +-- This operation, for block_size of 2, will return the following tensor +-- of shape `[1, 1, 1, 12]` +-- +-- ```prettyprint [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` +-- +-- Similarly, for the following input of shape `[1 4 4 1]`, and a block +-- size of 2: +-- +-- ```prettyprint x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], +-- [10], [13], [14]], [[11], [12], [15], [16]]]] ``` +-- +-- the operator will return the following tensor of shape `[1 2 2 4]`: +-- +-- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], +-- [13, 14, 15, 16]]]] ``` +spaceToDepth :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor Value t + +-- | Does nothing. Serves as a control trigger for scheduling. +-- +-- Only useful as a placeholder for control edges. +controlTrigger :: ControlNode + +-- | Divides a variable reference by sparse updates. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] /= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- /= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions divide. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +scatterDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 t -> Tensor Value t + +-- | Copy Op. +-- +-- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on +-- the device on which the tensor is allocated. +-- +-- Unlike the CopyHost Op, this op does not have HostMemory constraint on +-- its input or output. +copy :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient of the crop_and_resize op wrt the input boxes +-- tensor. +cropAndResizeGradBoxes :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Float -> Tensor v2 t -> Tensor v3 Float -> Tensor v4 Int32 -> Tensor Value Float + +-- | Computes the mean along sparse segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Like SegmentMean, but segment_ids can have rank less +-- than `data`'s first dimension, selecting a subset of dimension 0, +-- specified by indices. +sparseSegmentMean :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t + +-- | Update ref by assigning value to it. +-- +-- This operation outputs "ref" after the assignment is done. This makes +-- it easier to chain operations that need to use the reset value. +assign :: (TensorType t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Restores a tensor from checkpoint files. +-- +-- Reads a tensor stored in one or several files. If there are several +-- files (for instance because a tensor was saved as slices), +-- file_pattern may contain wildcard symbols (* and +-- ?) in the filename portion only, not in the directory +-- portion. +-- +-- If a file_pattern matches several files, +-- preferred_shard can be used to hint in which file the +-- requested tensor is likely to be found. This op will first open the +-- file at index preferred_shard in the list of matching files +-- and try to restore tensors from that file. Only if some tensors or +-- tensor slices are not found in that first file, then the Op opens all +-- the files. Setting preferred_shard to match the value passed +-- as the shard input of a matching Save Op may speed +-- up Restore. This attribute only affects performance, not correctness. +-- The default value -1 means files are processed in order. +-- +-- See also RestoreSlice. +restore :: (TensorType dt) => Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor Value dt + +-- | Computes gradients of the maxpooling function. +maxPoolGradWithArgmax :: (TensorType t, OneOf '[Word16, Float] t, TensorType targmax, OneOf '[Int32, Int64] targmax) => Tensor v1 t -> Tensor v2 t -> Tensor v3 targmax -> Tensor Value t + +-- | Checks a tensor for NaN and Inf values. +-- +-- When run, reports an InvalidArgument error if tensor +-- has any values that are not a number (NaN) or infinity (Inf). +-- Otherwise, passes tensor as-is. +checkNumerics :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns a tensor of zeros with the same shape and type as x. +zerosLike :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Reads and outputs the entire contents of the input filename. +readFile :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Shuffle dimensions of x according to a permutation. +-- +-- The output y has the same rank as x. The shapes of +-- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for +-- i in [0, 1, ..., rank(x) - 1]` +transpose :: (TensorType t, TensorType tperm, OneOf '[Int32, Int64] tperm) => Tensor v1 t -> Tensor v2 tperm -> Tensor Value t + +-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. +parseTensor :: (TensorType out_type) => Tensor v1 ByteString -> Tensor Value out_type + +-- | Computes acos of x element-wise. +acos :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Bitcasts a tensor from one type to another without copying data. +-- +-- Given a tensor input, this operation returns a tensor that +-- has the same buffer data as input with datatype `type`. +-- +-- If the input datatype T is larger than the output datatype +-- `type` then the shape changes from [...] to [..., +-- sizeof(T)/sizeof(`type`)]. +-- +-- If T is smaller than `type`, the operator requires that the +-- rightmost dimension be equal to sizeof(`type`)/sizeof(T). The +-- shape then goes from [..., sizeof(`type`)/sizeof(T)] to +-- [...]. +-- +--
      +--
    • NOTE*: Bitcast is implemented as a low-level cast, so machines +-- with different endian orderings will give different results.
    • +--
    +bitcast :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType type', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v1 t -> Tensor Value type' + +-- | Replaces the contents of the table with the specified keys and values. +-- +-- The tensor keys must be of the same type as the keys of the +-- table. The tensor values must be of the type of the table +-- values. +lookupTableImport :: (TensorType tin, TensorType tout) => Tensor v1 ByteString -> Tensor v2 tin -> Tensor v3 tout -> ControlNode + +-- | The backward operation for BiasAdd on the "bias" tensor. +-- +-- It accumulates all the values from out_backprop into the feature +-- dimension. For NHWC data format, the feature dimension is the last. +-- For NCHW data format, the feature dimension is the third-to-last. +biasAddGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t +batchSelfAdjointEig :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the product of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +prod :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Resize images to size using bilinear interpolation. +-- +-- Input images can be of different types but output images are always +-- float. +resizeBilinear :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float + +-- | Unpack the data from the input value into TensorArray elements. +-- +--
      +--
    • *WARNING: This op is deprecated.**
    • +--
    +-- +-- Instead of this op, use TensorArrayScatter with `indices = +-- RangeOp(0, SizeOp(value)[0])`. +tensorArrayUnpack :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor v3 Float -> Tensor Value Float +batchMatrixDeterminant :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the sum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +sum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Compute the inverse 2-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 2 dimensions of input. +iFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Creates a tensor filled with a scalar value. +-- +-- This operation creates a tensor of shape dims and fills it +-- with value. +-- +-- For example: +-- +-- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) +-- ==> [[9, 9, 9] [9, 9, 9]] ``` +fill :: (TensorType t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- A unigram sampler could use a fixed unigram distribution read from a +-- file or passed in as an in-memory array instead of building up the +-- distribution from data on the fly. There is also an option to skew the +-- distribution by applying a distortion power to the weights. +-- +-- The vocabulary file should be in CSV-like format, with the last field +-- being the weight associated with the word. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Computes the grayscale dilation of 4-D input and 3-D +-- filter tensors. +-- +-- The input tensor has shape `[batch, in_height, in_width, +-- depth]` and the filter tensor has shape `[filter_height, +-- filter_width, depth]`, i.e., each input channel is processed +-- independently of the others with its own structuring function. The +-- output tensor has shape `[batch, out_height, out_width, +-- depth]`. The spatial dimensions of the output tensor depend on the +-- padding algorithm. We currently only support the default +-- NHWC data_format. +-- +-- In detail, the grayscale morphological 2-D dilation is the max-sum +-- correlation (for consistency with conv2d, we use unmirrored +-- filters): +-- +-- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * +-- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] +-- +-- Max-pooling is a special case when the filter has size equal to the +-- pooling kernel size and contains all zeros. +-- +-- Note on duality: The dilation of input by the filter +-- is equal to the negation of the erosion of `-input` by the reflected +-- filter. +dilation2D :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Compute the polygamma function \(psi^{(n)}(x)\). +-- +-- The polygamma function is defined as: +-- +-- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the +-- digamma function. +polygamma :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Return the same ref tensor as the input ref tensor. +refIdentity :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | PNG-encode an image. +-- +-- image is a 3-D uint8 or uint16 Tensor of shape `[height, +-- width, channels]` where channels is: +-- +--
      +--
    • 1: for grayscale.
    • +--
    • 2: for grayscale + alpha.
    • +--
    • 3: for RGB.
    • +--
    • 4: for RGBA.
    • +--
    +-- +-- The ZLIB compression level, compression, can be -1 for the +-- PNG-encoder default or a value from 0 to 9. 9 is the highest +-- compression level, generating the smallest output, but is slower. +encodePng :: (TensorType t, OneOf '[Word16, Word8] t) => Tensor v1 t -> Tensor Value ByteString + +-- | Updates the table to associates keys with values. +-- +-- The tensor keys must be of the same type as the keys of the +-- table. The tensor values must be of the type of the table +-- values. +lookupTableInsert :: (TensorType tin, TensorType tout) => Tensor v1 ByteString -> Tensor v2 tin -> Tensor v3 tout -> ControlNode +batchIFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Finds unique elements in a 1-D tensor. +-- +-- This operation returns a tensor y containing all of the +-- unique elements of x sorted in the same order that they occur +-- in x. This operation also returns a tensor idx the +-- same size as x that contains the index of each value of +-- x in the unique output y. Finally, it returns a +-- third tensor count that contains the count of each element of +-- y in x. In other words: +-- +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` +-- +-- For example: +-- +-- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, +-- idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> +-- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` +uniqueWithCounts :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> (Tensor Value t, Tensor Value out_idx, Tensor Value out_idx) + +-- | Gather values or slices from params according to +-- indices. +-- +-- params is a Tensor of rank R and indices is +-- a Tensor of rank M. +-- +-- indices must be integer tensor, containing indices into +-- params. It must be shape `[d_0, ..., d_N, R]` where `0 < R +-- <= M`. +-- +-- The innermost dimension of indices (with length R) +-- corresponds to indices into elements (if `R = M`) or slices (if `R +-- < M`) along the Nth dimension of params. +-- +-- Produces an output tensor with shape +-- +--
      +--
    • d_0, ..., d_{n-1}, params.shape[R , ..., +-- params.shape[M-1]].
    • +--
    +-- +-- Some examples below. +-- +-- Simple indexing into a matrix: +-- +-- indices = [[0, 0], [1, 1]] params = [[a, b], +-- [c, d]] output = [a, d] +-- +-- Slice indexing into a matrix: +-- +-- indices = [[1], [0]] params = [[a, b], [c, +-- d]] output = [[c, d], [a, +-- b]] +-- +-- Indexing into a 3-tensor: +-- +-- indices = [[1]] params = [[[a0, b0], [c0, +-- d0]], [[a1, b1], [c1, +-- d1]]] output = [[[a1, b1], [c1, +-- d1]]] +-- +-- indices = [[0, 1], [1, 0]] params = [[[a0, b0], +-- [c0, d0]], [[a1, b1], +-- [c1, d1]]] output = [[c0, d0], +-- [a1, b1]] +-- +-- indices = [[0, 0, 1], [1, 0, 1]] params = [[[a0, +-- b0], [c0, d0]], [[a1, +-- b1], [c1, d1]]] output = [b0, +-- b1] +-- +-- Batched indexing into a matrix: +-- +-- indices = [[[0, 0]], [[0, 1]]] params = [[a, b], +-- [c, d]] output = [[a], [b]] +-- +-- Batched slice indexing into a matrix: +-- +-- indices = [[[1]], [[0]]] params = [[a, b], +-- [c, d]] output = [[[c, d]], +-- [[a, b]]] +-- +-- Batched indexing into a 3-tensor: +-- +-- indices = [[[1]], [[0]]] params = [[[a0, b0], +-- [c0, d0]], [[a1, b1], +-- [c1, d1]]] output = [[[[a1, b1], +-- [c1, d1]]], [[[a0, b0], +-- [c0, d0]]]] +-- +-- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = +-- [[[a0, b0], [c0, d0]], +-- [[a1, b1], [c1, d1]]] output = +-- [[[c0, d0], [a1, b1]], +-- [[a0, b0], [c1, d1]]] +-- +-- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = +-- [[[a0, b0], [c0, d0]], +-- [[a1, b1], [c1, d1]]] output = +-- [[b0, b1], [d0, c1]] +gatherNd :: (TensorType tindices, OneOf '[Int32, Int64] tindices, TensorType tparams) => Tensor v1 tparams -> Tensor v2 tindices -> Tensor Value tparams + +-- | Read an element from the TensorArray into output value. +tensorArrayRead :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value dtype + +-- | Returns up to num_records (key, value) pairs produced by a +-- Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). It may return less than num_records even +-- before the last batch. +readerReadUpTo :: Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor v3 Int64 -> (Tensor Value ByteString, Tensor Value ByteString) + +-- | Compute the regularized incomplete beta integral \(I_x(a, b)\). +-- +-- The regularized incomplete beta integral is defined as: +-- +-- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where +-- +-- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ``` +-- +-- is the incomplete beta function and \(B(a, b)\) is the *complete* beta +-- function. +betainc :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t +batchMatrixBandPart :: (TensorType t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor Value t + +-- | Computes the gradients of depthwise convolution with respect to the +-- input. +depthwiseConv2dNativeBackpropInput :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Forwards the indexth element of inputs to +-- output. +refSelect :: (TensorType t) => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t + +-- | Exits the current frame to its parent frame. +-- +-- Exit makes its input `data` available to the parent frame. +exit :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Looks up keys in a table, outputs the corresponding values. +-- +-- The tensor keys must of the same type as the keys of the +-- table. The output values is of the type of the table values. +-- +-- The scalar default_value is the value output for keys not +-- present in the table. It must also be of the same type as the table +-- values. +lookupTableFind :: (TensorType tin, TensorType tout) => Tensor v1 ByteString -> Tensor v2 tin -> Tensor v3 tout -> Tensor Value tout + +-- | Removes dimensions of size 1 from the shape of a tensor. +-- +-- Given a tensor input, this operation returns a tensor of the +-- same type with all dimensions of size 1 removed. If you don't want to +-- remove all size 1 dimensions, you can remove specific size 1 +-- dimensions by specifying squeeze_dims. +-- +-- For example: +-- +-- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] +-- shape(squeeze(t)) ==> [2, 3] ``` +-- +-- Or, to remove specific size 1 dimensions: +-- +-- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] +-- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` +squeeze :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes the mean of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +mean :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | SpaceToBatch for N-D tensors of type T. +-- +-- This operation divides "spatial" dimensions `[1, ..., M]` of the input +-- into a grid of blocks of shape block_shape, and interleaves +-- these blocks with the "batch" dimension (0) such that in the output, +-- the spatial dimensions `[1, ..., M]` correspond to the position within +-- the grid, and the batch dimension combines both the position within a +-- spatial block and the original batch position. Prior to division into +-- blocks, the spatial dimensions of the input are optionally zero padded +-- according to paddings. See below for a precise description. +spaceToBatchND :: (TensorType t, TensorType tblock_shape, OneOf '[Int32, Int64] tblock_shape, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tblock_shape -> Tensor v3 tpaddings -> Tensor Value t + +-- | SpaceToBatch for 4-D tensors of type T. +-- +-- This is a legacy version of the more general SpaceToBatchND. +-- +-- Zero-pads and then rearranges (permutes) blocks of spatial data into +-- batch. More specifically, this op outputs a copy of the input tensor +-- where values from the height and width dimensions +-- are moved to the batch dimension. After the zero-padding, +-- both height and width of the input must be divisible +-- by the block size. +spaceToBatch :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t + +-- | Performs greedy decoding on the logits given in inputs. +-- +-- A note about the attribute merge_repeated: if enabled, when +-- consecutive logits' maximum indices are the same, only the first of +-- these is emitted. Labeling the blank *, the sequence "A B B * B +-- B" becomes "A B" if merge_repeated = True and "A B B B B" if +-- merge_repeated = False. +-- +-- Regardless of the value of merge_repeated, if the maximum index of a +-- given time and batch corresponds to the blank, index `(num_classes - +-- 1)`, no new element is emitted. +cTCGreedyDecoder :: Tensor v1 Float -> Tensor v2 Int32 -> (Tensor Value Int64, Tensor Value Int64, Tensor Value Int64, Tensor Value Float) + +-- | BatchToSpace for N-D tensors of type T. +-- +-- This operation reshapes the "batch" dimension 0 into `M + 1` +-- dimensions of shape `block_shape + [batch]`, interleaves these blocks +-- back into the grid defined by the spatial dimensions `[1, ..., M]`, to +-- obtain a result with the same rank as the input. The spatial +-- dimensions of this intermediate result are then optionally cropped +-- according to crops to produce the output. This is the reverse +-- of SpaceToBatch. See below for a precise description. +batchToSpaceND :: (TensorType t, TensorType tblock_shape, OneOf '[Int32, Int64] tblock_shape, TensorType tcrops, OneOf '[Int32, Int64] tcrops) => Tensor v1 t -> Tensor v2 tblock_shape -> Tensor v3 tcrops -> Tensor Value t + +-- | Packs a list of N rank-R tensors into one +-- rank-`(R+1)` tensor. +-- +-- Packs the N tensors in values into a tensor with +-- rank one higher than each tensor in values, by packing them +-- along the axis dimension. Given a list of tensors of shape +-- `(A, B, C)`; +-- +-- if `axis == 0` then the output tensor will have the shape +-- `(N, A, B, C)`. if `axis == 1` then the output tensor will +-- have the shape `(A, N, B, C)`. Etc. +-- +-- For example: +-- +-- ```prettyprint # x is [1, 4] # y is [2, 5] # +-- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # +-- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, +-- 6]] ``` +-- +-- This is the opposite of unpack. +pack :: (TensorType t) => [Tensor v1 t] -> Tensor Value t + +-- | Returns a one-hot tensor. +-- +-- The locations represented by indices in indices take value +-- on_value, while all other locations take value +-- off_value. +-- +-- If the input indices is rank N, the output will have +-- rank `N+1`, The new axis is created at dimension axis +-- (default: the new axis is appended at the end). +-- +-- If indices is a scalar the output shape will be a vector of +-- length depth. +-- +-- If indices is a vector of length features, the +-- output shape will be: ``` features x depth if axis == -1 depth x +-- features if axis == 0 ``` +-- +-- If indices is a matrix (batch) with shape `[batch, +-- features]`, the output shape will be: ``` batch x features x depth if +-- axis == -1 batch x depth x features if axis == 1 depth x batch x +-- features if axis == 0 ``` +-- +-- Examples ========= +-- +-- Suppose that +-- +-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 +-- axis = -1 ``` +-- +-- Then output is `[4 x 3]`: +-- +-- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) +-- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` +-- +-- Suppose that +-- +-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 +-- axis = 0 ``` +-- +-- Then output is `[3 x 4]`: +-- +-- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 +-- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ +-- one_hot(1) ``` Suppose that +-- +-- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = +-- 0.0 axis = -1 ``` +-- +-- Then output is `[2 x 2 x 3]`: +-- +-- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // +-- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // +-- one_hot(-1) ]``` +oneHot :: (TensorType t, TensorType tI, OneOf '[Int32, Int64, Word8] tI) => Tensor v1 tI -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 t -> Tensor Value t + +-- | Return the reduction indices for computing gradients of s0 op s1 with +-- broadcast. +-- +-- This is typically used by gradient computations for a broadcasting +-- operation. +broadcastGradientArgs :: (TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) + +-- | Returns a batched matrix tensor with new batched diagonal values. +-- +-- Given input and diagonal, this operation returns a +-- tensor with the same shape and values as input, except for +-- the diagonals of the innermost matrices. These will be overwritten by +-- the values in diagonal. The batched matrices must be square. +-- +-- The output is computed as follows: +-- +-- Assume input has `k+1` dimensions `[I, J, K, ..., N, N]` and +-- diagonal has k dimensions `[I, J, K, ..., N]`. Then +-- the output is a tensor of rank `k+1` with dimensions [I, J, K, ..., N, +-- N]` where: +-- +--
      +--
    • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == +-- n`.
    • +--
    • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != +-- n`.
    • +--
    +matrixSetDiag :: (TensorType t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implement of this algorithm, ms and mom will update +-- even if the grad is zero, but in this sparse implement, ms and mom +-- will not update in iterations the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +applyRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor Value t + +-- | Returns a constant tensor. +const :: (TensorType dtype) => Tensor Value dtype + +-- | Creates or finds a child frame, and makes `data` available to the +-- child frame. +-- +-- This op is used together with Exit to create loops in the +-- graph. The unique frame_name is used by the Executor +-- to identify frames. If is_constant is true, output +-- is a constant in the child frame; otherwise it may be changed in the +-- child frame. At most parallel_iterations iterations are run +-- in parallel in the child frame. +enter :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Debug Identity Op. +-- +-- Provides an identity mapping of the non-Ref type input tensor for +-- debugging. +debugIdentity :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Debug NaN Value Counter Op +-- +-- Counts number of NaNs in the input tensor, for debugging. +debugNanCount :: (TensorType t) => Tensor v1 t -> Tensor Value Int64 + +-- | Batch normalization. +-- +-- This op is deprecated. Prefer `tf.nn.batch_normalization`. +batchNormWithGlobalNormalization :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor Value t +batchMatrixDiag :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Unpacks a given dimension of a rank-R tensor into +-- num rank-`(R-1)` tensors. +-- +-- Unpacks num tensors from value by chipping it along +-- the axis dimension. For example, given a tensor of shape `(A, +-- B, C, D)`; +-- +-- If `axis == 0` then the i'th tensor in output is the slice +-- `value[i, :, :, :]` and each tensor in output will have shape +-- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike +-- split). +-- +-- If `axis == 1` then the i'th tensor in output is the slice +-- `value[:, i, :, :]` and each tensor in output will have shape +-- `(A, C, D)`. Etc. +-- +-- This is the opposite of pack. +unpack :: (TensorType t) => Int64 -> Tensor v1 t -> [Tensor Value t] + +-- | Split a SparseTensor into num_split tensors along +-- one dimension. +-- +-- If the `shape[split_dim]` is not an integer multiple of +-- num_split. Slices `[0 : shape[split_dim] % num_split]` gets +-- one extra dimension. For example, if `split_dim = 1` and `num_split = +-- 2` and the input is +-- +-- input_tensor = shape = [2, 7] [ a d e ] [b c ] +-- +-- Graphically the output tensors are: +-- +-- output_tensor[0] = shape = [2, 4] [ a ] [b c ] +-- +-- output_tensor[1] = shape = [2, 3] [ d e ] [ ] +sparseSplit :: (TensorType t) => Int64 -> Tensor v1 Int64 -> Tensor v2 Int64 -> Tensor v3 t -> Tensor v4 Int64 -> ([Tensor Value Int64], [Tensor Value t], [Tensor Value Int64]) + +-- | Pads a tensor with mirrored values. +-- +-- This operation pads a input with mirrored values according to +-- the paddings you specify. paddings is an integer +-- tensor with shape `[n, 2]`, where n is the rank of input. For +-- each dimension D of input, `paddings[D, 0]` indicates how +-- many values to add before the contents of input in that +-- dimension, and `paddings[D, 1]` indicates how many values to add after +-- the contents of input in that dimension. Both `paddings[D, +-- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` +-- (or `input.dim_size(D) - 1`) if copy_border is true (if +-- false, respectively). +-- +-- The padded size of each dimension D of the output is: +-- +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 2, 3], [4, 5, 6]]. # +-- paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. +-- # rank of t is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, +-- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] +-- ``` +mirrorPad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t +batchMatrixDiagPart :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes gradient of the FractionalMaxPool function. +fractionalMaxPoolGrad :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 Int64 -> Tensor v5 Int64 -> Tensor Value t + +-- | Returns the set of files matching a pattern. +-- +-- Note that this routine only supports wildcard characters in the +-- basename portion of the pattern, not in the directory portion. +matchingFiles :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Constructs a tensor by tiling a given tensor. +-- +-- This operation creates a new tensor by replicating input +-- multiples times. The output tensor's i'th dimension has +-- `input.dims(i) * multiples[i]` elements, and the values of +-- input are replicated `multiples[i]` times along the +-- ith dimension. For example, tiling `[a b c d]` by `[2]` +-- produces `[a b c d a b c d]`. +tile :: (TensorType t, TensorType tmultiples, OneOf '[Int32, Int64] tmultiples) => Tensor v1 t -> Tensor v2 tmultiples -> Tensor Value t + +-- | Returns the element-wise min of two SparseTensors. +-- +-- Assumes the two SparseTensors have the same shape, i.e., no +-- broadcasting. +sparseSparseMinimum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> (Tensor Value Int64, Tensor Value t) + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Forwards the ref tensor `data` to the output port determined by +-- pred. +-- +-- If pred is true, the `data` input is forwarded to +-- output_true. Otherwise, the data goes to +-- output_false. +-- +-- See also Switch and Merge. +refSwitch :: (TensorType t) => Tensor v1 t -> Tensor v2 Bool -> (Tensor Value t, Tensor Value t) + +-- | Merges summaries. +-- +-- This op creates a `Summary` protocol buffer that contains the +-- union of all the values in the input summaries. +-- +-- When the Op is run, it reports an InvalidArgument error if +-- multiple values in the summaries to merge use the same tag. +mergeSummary :: [Tensor v1 ByteString] -> Tensor Value ByteString + +-- | Returns the truth value of NOT x element-wise. +logicalNot :: Tensor v1 Bool -> Tensor Value Bool + +-- | Gradients for Local Response Normalization. +lRNGrad :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Converts each string in the input Tensor to the specified numeric +-- type. +-- +-- (Note that int32 overflow results in an error while float overflow +-- results in a rounded value.) +stringToNumber :: (TensorType out_type, OneOf '[Int32, Float] out_type) => Tensor v1 ByteString -> Tensor Value out_type + +-- | Multiply matrix "a" by matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" must match the outer dimension of "b". This op is optimized for +-- the case where at least one of "a" or "b" is sparse. The breakeven for +-- using this versus a dense matrix multiply on one platform was 30% zero +-- values in the sparse matrix. +sparseMatMul :: (TensorType ta, OneOf '[Word16, Float] ta, TensorType tb, OneOf '[Word16, Float] tb) => Tensor v1 ta -> Tensor v2 tb -> Tensor Value Float + +-- | Forwards the value of an available tensor from inputs to +-- output. +-- +-- Merge waits for at least one of the tensors in +-- inputs to become available. It is usually combined with +-- Switch to implement branching. +-- +-- Merge forwards the first tensor for become available to +-- output, and sets value_index to its index in +-- inputs. +merge :: (TensorType t) => [Tensor v1 t] -> (Tensor Value t, Tensor Value Int32) + +-- | Computes the reverse mode backpropagated gradient of the Cholesky +-- algorithm. +-- +-- For an explanation see "Differentiation of the Cholesky algorithm" by +-- Iain Murray http://arxiv.org/abs/1602.07527. +choleskyGrad :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +batchCholeskyGrad :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Gather specific elements from the TensorArray into output +-- value. +-- +-- All elements selected by indices must have the same shape. +tensorArrayGather :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value dtype + +-- | Resize images to size using nearest neighbor +-- interpolation. +resizeNearestNeighbor :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t + +-- | Training via negative sampling. +negTrain :: Int64 -> Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor v5 Float -> ControlNode + +-- | Creates a TensorArray for storing the gradients of values in the given +-- handle. +-- +-- If the given TensorArray gradient already exists, returns a reference +-- to it. +-- +-- Locks the size of the original TensorArray by disabling its dynamic +-- size flag. +-- +--
      +--
    • *A note about the input flow_in:**
    • +--
    +-- +-- The handle flow_in forces the execution of the gradient lookup to +-- occur only after certain other operations have occurred. For example, +-- when the forward TensorArray is dynamically sized, writes to this +-- TensorArray may resize the object. The gradient TensorArray is +-- statically sized based on the size of the forward TensorArray when +-- this operation executes. Furthermore, the size of the forward +-- TensorArray is frozen by this call. As a result, the flow is used to +-- ensure that the call to generate the gradient TensorArray only happens +-- after all writes are executed. +-- +-- In the case of dynamically sized TensorArrays, gradient computation +-- should only be performed on read operations that have themselves been +-- chained via flow to occur only after all writes have executed. That +-- way the final size of the forward TensorArray is known when this +-- operation is called. +-- +--
      +--
    • *A note about the source attribute:**
    • +--
    +-- +-- TensorArray gradient calls use an accumulator TensorArray object. If +-- multiple gradients are calculated and run in the same session, the +-- multiple gradient nodes may accidentally flow throuth the same +-- accumulator TensorArray. This double counts and generally breaks the +-- TensorArray gradient flow. +-- +-- The solution is to identify which gradient call this particular +-- TensorArray gradient is being called in. This is performed by +-- identifying a unique string (e.g. "gradients", "gradients_1", ...) +-- from the input gradient Tensor's name. This string is used as a suffix +-- when creating the TensorArray gradient object here (the attribute +-- source). +-- +-- The attribute source is added as a suffix to the forward +-- TensorArray's name when performing the creation / lookup, so that each +-- separate gradient calculation gets its own TensorArray accumulator. +tensorArrayGrad :: Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value ByteString + +-- | Outputs a Summary protocol buffer with audio. +-- +-- The summary has up to max_outputs summary values containing +-- audio. The audio is built from tensor which must be 3-D with +-- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, +-- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` +-- with a sample rate of sample_rate. +-- +-- The tag argument is a scalar Tensor of type +-- string. It is used to build the tag of the summary +-- values: +-- +--
      +--
    • If max_outputs is 1, the summary value tag is +-- '*tag*/audio'.
    • +--
    • If max_outputs is greater than 1, the summary value tags +-- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', +-- etc.
    • +--
    +audioSummary :: Float -> Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value ByteString + +-- | Does nothing. Only useful as a placeholder for control edges. +noOp :: ControlNode + +-- | Makes its input available to the next iteration. +nextIteration :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Computes softplus gradients for a softplus operation. +softplusGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the singular value decompositions of one or more matrices. +-- +-- Computes the SVD of each inner matrix in input such that +-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * +-- transpose(v[..., :, :])` +-- +-- ```prettyprint # a is a tensor containing a batch of matrices. # s is +-- a tensor of singular values for each matrix. # u is the tensor +-- containing of left singular vectors for each matrix. # v is the tensor +-- containing of right singular vectors for each matrix. s, u, v = svd(a) +-- s, _, _ = svd(a, compute_uv=False) ``` +svd :: (TensorType t, OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t, Tensor Value t) + +-- | Convert one or more images from HSV to RGB. +-- +-- Outputs a tensor of the same shape as the images tensor, +-- containing the RGB value of the pixels. The output is only well +-- defined if the value in images are in `[0,1]`. +-- +-- See rgb_to_hsv for a description of the HSV encoding. +hSVToRGB :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Outputs random values from a normal distribution. The parameters may +-- each be a +-- +-- scalar which applies to the entire output, or a vector of length +-- shape[0] which stores the parameters for each batch. +parameterizedTruncatedNormal :: (TensorType t, OneOf '[Int32, Int64] t, TensorType dtype, OneOf '[Word16, Double, Float] dtype) => Tensor v1 t -> Tensor v2 dtype -> Tensor v3 dtype -> Tensor v4 dtype -> Tensor v5 dtype -> Tensor Value dtype + +-- | Computes square of x element-wise. +-- +-- I.e., \(y = x * x = x^2\). +square :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes exponential linear: `exp(features) - 1` if < 0, +-- features otherwise. +-- +-- See Fast and Accurate Deep Network Learning by Exponential Linear +-- Units (ELUs) +elu :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Outputs all keys and values in the table. +lookupTableExport :: (TensorType tkeys, TensorType tvalues) => Tensor v1 ByteString -> (Tensor Value tkeys, Tensor Value tvalues) + +-- | Computes the number of elements in the given table. +lookupTableSize :: Tensor v1 ByteString -> Tensor Value Int64 + +-- | Computes gradients of the average pooling function. +avgPoolGrad :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t + +-- | Computes the ids of the positions in sampled_candidates that match +-- true_labels. +-- +-- When doing log-odds NCE, the result of this op should be passed +-- through a SparseToDense op, then added to the logits of the sampled +-- candidates. This has the effect of removing the sampled +-- labels that match the true labels by making the classifier sure that +-- they are sampled labels. +computeAccidentalHits :: Int64 -> Tensor v1 Int64 -> Tensor v2 Int64 -> (Tensor Value Int32, Tensor Value Int64, Tensor Value Float) + +-- | Calculates the CTC Loss (log probability) for each batch entry. Also +-- calculates +-- +-- the gradient. This class performs the softmax operation for you, so +-- inputs should be e.g. linear projections of outputs by an LSTM. +cTCLoss :: Tensor v1 Float -> Tensor v2 Int64 -> Tensor v3 Int32 -> Tensor v4 Int32 -> (Tensor Value Float, Tensor Value Float) + +-- | Performs 3D average pooling on the input. +avgPool3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the reciprocal of x element-wise. +-- +-- I.e., \(y = 1 / x\). +inv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Pop the element at the top of the stack. +stackPop :: (TensorType elem_type) => Tensor v1 ByteString -> Tensor Value elem_type + +-- | A queue that produces elements in first-in first-out order. +-- +-- Variable-size shapes are allowed by setting the corresponding shape +-- dimensions to 0 in the shape attr. In this case DequeueMany will pad +-- up to the maximum size of any given element in the minibatch. See +-- below for details. +paddingFIFOQueue :: Tensor Value ByteString +batchSelfAdjointEigV2 :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t) +batchMatrixTriangularSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +batchMatrixSolveLs :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 Double -> Tensor Value t +batchSvd :: (TensorType t, OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t, Tensor Value t) + +-- | Outputs a Summary protocol buffer with a tensor. +tensorSummary :: (TensorType t) => Tensor v1 t -> Tensor Value ByteString + +-- | Computes softmax cross entropy cost and gradients to backpropagate. +-- +-- Unlike SoftmaxCrossEntropyWithLogits, this operation does not +-- accept a matrix of label probabilities, but rather a single label per +-- row of features. This label is considered to have probability 1.0 for +-- the given row. +-- +-- Inputs are the logits, not probabilities. +sparseSoftmaxCrossEntropyWithLogits :: (TensorType t, OneOf '[Word16, Double, Float] t, TensorType tlabels, OneOf '[Int32, Int64] tlabels) => Tensor v1 t -> Tensor v2 tlabels -> (Tensor Value t, Tensor Value t) + +-- | Performs max pooling on the input and outputs both max values and +-- indices. +-- +-- The indices in argmax are flattened, so that a maximum value +-- at position `[b, y, x, c]` becomes flattened index `((b * height + y) +-- * width + x) * channels + c`. +maxPoolWithArgmax :: (TensorType t, OneOf '[Word16, Float] t, TensorType targmax, OneOf '[Int32, Int64] targmax) => Tensor v1 t -> (Tensor Value t, Tensor Value targmax) + +-- | Compute the 1-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- dimension of input. +fFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Outputs a Summary protocol buffer with a histogram. +-- +-- The generated `Summary` has one summary value containing a +-- histogram for values. +-- +-- This op reports an InvalidArgument error if any value is not +-- finite. +histogramSummary :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString + +-- | Pads a tensor with zeros. +-- +-- This operation pads a input with zeros according to the +-- paddings you specify. paddings is an integer tensor +-- with shape `[Dn, 2]`, where n is the rank of input. For each +-- dimension D of input, `paddings[D, 0]` indicates how many +-- zeros to add before the contents of input in that dimension, +-- and `paddings[D, 1]` indicates how many zeros to add after the +-- contents of input in that dimension. +-- +-- The padded size of each dimension D of the output is: +-- +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 1], [2, 2]] # paddings is +-- [[1, 1], [2, 2]] # rank of t is 2 pad(t, paddings) ==> +-- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, +-- 0, 0]] ``` +pad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t +batchIFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Outputs a Summary protocol buffer with images. +-- +-- The summary has up to max_images summary values containing +-- images. The images are built from tensor which must be 4-D +-- with shape `[batch_size, height, width, channels]` and where +-- channels can be: +-- +--
      +--
    • 1: tensor is interpreted as Grayscale.
    • +--
    • 3: tensor is interpreted as RGB.
    • +--
    • 4: tensor is interpreted as RGBA.
    • +--
    +-- +-- The images have the same number of channels as the input tensor. For +-- float input, the values are normalized one image at a time to fit in +-- the range `[0, 255]`. uint8 values are unchanged. The op uses +-- two different normalization algorithms: +-- +--
      +--
    • If the input values are all positive, they are rescaled so the +-- largest one is 255.
    • +--
    • If any input value is negative, the values are shifted so input +-- value 0.0 is at 127. They are then rescaled so that either the +-- smallest value is 0, or the largest one is 255.
    • +--
    +-- +-- The tag argument is a scalar Tensor of type +-- string. It is used to build the tag of the summary +-- values: +-- +--
      +--
    • If max_images is 1, the summary value tag is +-- '*tag*/image'.
    • +--
    • If max_images is greater than 1, the summary value tags +-- are generated sequentially as '*tag*/image/0', '*tag*/image/1', +-- etc.
    • +--
    +-- +-- The bad_color argument is the color to use in the generated +-- images for non-finite input values. It is a unit8 1-D tensor +-- of length channels. Each element must be in the range `[0, +-- 255]` (It represents the value of a pixel in the output image). +-- Non-finite values in the input tensor are replaced by this tensor in +-- the output image. The default value is the color red. +imageSummary :: (TensorType t, OneOf '[Word16, Word8, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString + +-- | Computes the sum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = sum_j data_j\) where sum is +-- over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentSum.png" alt /div +segmentSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t + +-- | JPEG-encode an image. +-- +-- image is a 3-D uint8 Tensor of shape `[height, width, +-- channels]`. +-- +-- The attr format can be used to override the color format of +-- the encoded output. Values can be: +-- +--
      +--
    • `''`: Use a default format based on the number of channels in the +-- image.
    • +--
    • grayscale: Output a grayscale JPEG image. The +-- channels dimension of image must be 1.
    • +--
    • rgb: Output an RGB JPEG image. The channels +-- dimension of image must be 3.
    • +--
    +-- +-- If format is not specified or is the empty string, a default +-- format is picked in function of the number of channels in +-- image: +-- +--
      +--
    • 1: Output a grayscale image.
    • +--
    • 3: Output an RGB image.
    • +--
    +encodeJpeg :: Tensor v1 Word8 -> Tensor Value ByteString + +-- | Gradients for batch normalization. +-- +-- This op is deprecated. See `tf.nn.batch_normalization`. +batchNormWithGlobalNormalizationGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t) + +-- | Adds bias to value. +-- +-- This is a deprecated version of BiasAdd and will be soon removed. +-- +-- This is a special case of `tf.add` where bias is restricted +-- to be 1-D. Broadcasting is supported, so value may have any +-- number of dimensions. +biasAddV1 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the inverse permutation of a tensor. +-- +-- This operation computes the inverse of an index permutation. It takes +-- a 1-D integer tensor x, which represents the indices of a +-- zero-based array, and swaps each value with its index position. In +-- other words, for an output tensor y and an input tensor +-- x, this operation computes the following: +-- +-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` +-- +-- The values must include 0. There can be no duplicate values or +-- negative values. +-- +-- For example: +-- +-- ```prettyprint # tensor x is [3, 4, 0, 2, 1] +-- invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` +invertPermutation :: (TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor Value t + +-- | Gradient op for MirrorPad op. This op folds a mirror-padded +-- tensor. +-- +-- This operation folds the padded areas of input by +-- MirrorPad according to the paddings you specify. +-- paddings must be the same as paddings argument given +-- to the corresponding MirrorPad op. +-- +-- The folded size of each dimension D of the output is: +-- +-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # +-- paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. +-- # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] +-- ``` +mirrorPadGrad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t + +-- | Reverses specific dimensions of a tensor. +-- +-- Given a tensor, and a bool tensor dims +-- representing the dimensions of tensor, this operation +-- reverses each dimension i of tensor where `dims[i]` is +-- True. +-- +-- tensor can have up to 8 dimensions. The number of dimensions +-- of tensor must equal the number of elements in dims. +-- In other words: +-- +-- `rank(tensor) = size(dims)` +-- +-- For example: +-- +-- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, +-- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # +-- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] +-- +-- # dims is [False, False, False, True] reverse(t, dims) ==> +-- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], +-- [19, 18, 17, 16], [23, 22, 21, 20]]]] +-- +-- # dims is [False, True, False, False] reverse(t, dims) ==> +-- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, +-- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] +-- +-- # dims is [False, False, True, False] reverse(t, dims) ==> +-- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, +-- 17, 18, 19], [12, 13, 14, 15]]]] ``` +reverse :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Bool -> Tensor Value t + +-- | Computes a 2-D convolution given 4-D input and filter +-- tensors. +-- +-- Given an input tensor of shape `[batch, in_height, in_width, +-- in_channels]` and a filter / kernel tensor of shape `[filter_height, +-- filter_width, in_channels, out_channels]`, this op performs the +-- following: +-- +--
      +--
    1. Flattens the filter to a 2-D matrix with shape `[filter_height * +-- filter_width * in_channels, output_channels]`.
    2. +--
    3. Extracts image patches from the input tensor to form a *virtual* +-- tensor of shape `[batch, out_height, out_width, filter_height * +-- filter_width * in_channels]`.
    4. +--
    5. For each patch, right-multiplies the filter matrix and the image +-- patch vector.
    6. +--
    +-- +-- In detail, with the default NHWC format, +-- +-- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, +-- strides[2] * j + dj, q] * filter[di, dj, q, k] +-- +-- Must have `strides[0] = strides[3] = 1`. For the most common case of +-- the same horizontal and vertices strides, `strides = [1, stride, +-- stride, 1]`. +conv2D :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the gradients of convolution with respect to the input. +conv2DBackpropInput :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Produce a string tensor that encodes the state of a Reader. +-- +-- Not all Readers support being serialized, so this can produce an +-- Unimplemented error. +readerSerializeState :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Returns a tensor that may be mutated, but only persists within a +-- single step. +-- +-- This is an experimental op for internal use only and it is possible to +-- use this op in unsafe ways. DO NOT USE unless you fully understand the +-- risks. +-- +-- It is the caller's responsibility to ensure that ref is +-- eventually passed to a matching DestroyTemporaryVariable op +-- after all other uses have completed. +-- +-- Outputs a ref to the tensor state so it may be read or modified. +-- +-- E.g. var = state_ops._temporary_variable([1, 2], types.float_) +-- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = +-- state_ops.assign_add(var, [[6.0, 7.0]]) final = +-- state_ops._destroy_temporary_variable(var, var_name=var_name) +temporaryVariable :: (TensorType dtype) => Tensor Value dtype + +-- | Extracts crops from the input image tensor and bilinearly resizes them +-- (possibly +-- +-- with aspect ratio change) to a common output size specified by +-- crop_size. This is more general than the +-- crop_to_bounding_box op which extracts a fixed size slice +-- from the input image and does not allow resizing or aspect ratio +-- change. +-- +-- Returns a tensor with crops from the input image at +-- positions defined at the bounding box locations in boxes. The +-- cropped boxes are all resized (with bilinear interpolation) to a fixed +-- `size = [crop_height, crop_width]`. The result is a 4-D tensor +-- `[num_boxes, crop_height, crop_width, depth]`. +cropAndResize :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value Float + +-- | Computes gradients of the maxpooling function. +maxPoolGrad :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Performs a resize and padding as a preprocess during a convolution. +-- +-- It's often possible to do spatial transformations more efficiently as +-- part of the packing stage of a convolution, so this op allows for an +-- optimized implementation where these stages are fused together. This +-- prevents the need to write out the intermediate results as whole +-- tensors, reducing memory pressure, and we can get some latency gains +-- by merging the transformation calculations. The data_format attribute +-- for Conv2D isn't supported by this op, and defaults to NHWC +-- order. Internally this op uses a single per-graph scratch buffer, +-- which means that it will block if multiple versions are being run in +-- parallel. This is because this operator is primarily an optimization +-- to minimize memory usage. +fusedResizeAndPadConv2D :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 Int32 -> Tensor v4 t -> Tensor Value t + +-- | Outputs random values from a uniform distribution. +-- +-- The generated values follow a uniform distribution in the range `[0, +-- 1)`. The lower bound 0 is included in the range, while the upper bound +-- 1 is excluded. +randomUniform :: (TensorType t, OneOf '[Int32, Int64] t, TensorType dtype, OneOf '[Word16, Double, Float] dtype) => Tensor v1 t -> Tensor Value dtype + +-- | Computes a 2-D depthwise convolution given 4-D input and +-- filter tensors. +-- +-- Given an input tensor of shape `[batch, in_height, in_width, +-- in_channels]` and a filter / kernel tensor of shape `[filter_height, +-- filter_width, in_channels, channel_multiplier]`, containing +-- in_channels convolutional filters of depth 1, +-- depthwise_conv2d applies a different filter to each input +-- channel (expanding from 1 channel to channel_multiplier +-- channels for each), then concatenates the results together. Thus, the +-- output has `in_channels * channel_multiplier` channels. +-- +-- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, +-- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * +-- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] +-- +-- Must have `strides[0] = strides[3] = 1`. For the most common case of +-- the same horizontal and vertices strides, `strides = [1, stride, +-- stride, 1]`. +depthwiseConv2dNative :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | var: Should be from a Variable(). +sparseApplyAdadelta :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 tindices -> Tensor Value t + +-- | Computes the gradients of depthwise convolution with respect to the +-- filter. +depthwiseConv2dNativeBackpropFilter :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t + +-- | Computes a 3-D convolution given 5-D input and filter +-- tensors. +-- +-- In signal processing, cross-correlation is a measure of similarity of +-- two waveforms as a function of a time-lag applied to one of them. This +-- is also known as a sliding dot product or sliding inner-product. +-- +-- Our Conv3D implements a form of cross-correlation. +conv3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns the truth value of (x >= y) element-wise. +-- +--
      +--
    • NOTE*: GreaterEqual supports broadcasting. More about +-- broadcasting here
    • +--
    +greaterEqual :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Adds up a SparseTensor and a dense Tensor, using these special rules: +-- +--
      +--
    1. Broadcasts the dense side to have the same shape as the sparse +-- side, if eligible;
    2. +--
    3. Then, only the dense values pointed to by the indices of the +-- SparseTensor participate in the cwise addition.
    4. +--
    +-- +-- By these rules, the result is a logical SparseTensor with exactly the +-- same indices and shape, but possibly with different non-zero values. +-- The output of this Op is the resultant non-zero values. +sparseDenseCwiseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t + +-- | Computes the gradients of 3-D convolution with respect to the filter. +conv3DBackpropFilter :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Computes the gradients of 3-D convolution with respect to the input. +conv3DBackpropInputV2 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Returns element-wise remainder of division. +-- +--
      +--
    • NOTE*: Mod supports broadcasting. More about broadcasting +-- here
    • +--
    +mod :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Forwards the value of an available tensor from inputs to +-- output. +-- +-- Merge waits for at least one of the tensors in +-- inputs to become available. It is usually combined with +-- Switch to implement branching. +-- +-- Merge forwards the first tensor for become available to +-- output, and sets value_index to its index in +-- inputs. +refMerge :: (TensorType t) => [Tensor v1 t] -> (Tensor Value t, Tensor Value Int32) + +-- | Computes the gradients of 3-D convolution with respect to the filter. +conv3DBackpropFilterV2 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t + +-- | Serialize an N-minibatch SparseTensor into an `[N, +-- 3]` string Tensor. +-- +-- The SparseTensor must have rank R greater than 1, +-- and the first dimension is treated as the minibatch dimension. +-- Elements of the SparseTensor must be sorted in increasing +-- order of this first dimension. The serialized SparseTensor +-- objects going into each row of serialized_sparse will have +-- rank `R-1`. +-- +-- The minibatch size N is extracted from `sparse_shape[0]`. +serializeManySparse :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value ByteString + +-- | Computes gradients of average pooling function. +avgPool3DGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t + +-- | Computes gradients of max pooling function. +maxPool3DGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Float -> Tensor v2 Float -> Tensor v3 t -> Tensor Value t + +-- | Computes the sum of elements across dimensions of a SparseTensor. +-- +-- This Op takes a SparseTensor and is the sparse counterpart to +-- `tf.reduce_sum()`. In particular, this Op also returns a dense +-- Tensor instead of a sparse one. +-- +-- Reduces sp_input along the dimensions given in +-- reduction_axes. Unless keep_dims is true, the rank +-- of the tensor is reduced by 1 for each entry in +-- reduction_axes. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +-- +-- If reduction_axes has no entries, all dimensions are reduced, +-- and a tensor with a single element is returned. Additionally, the axes +-- can be negative, which are interpreted according to the indexing rules +-- in Python. +sparseReduceSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int32 -> Tensor Value t + +-- | Computes rectified linear: `max(features, 0)`. +relu :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | L2 Loss. +-- +-- Computes half the L2 norm of a tensor without the sqrt: +-- +-- output = sum(t ** 2) / 2 +l2Loss :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Restore a reader to a previously saved state. +-- +-- Not all Readers support being restored, so this can produce an +-- Unimplemented error. +readerRestoreState :: Tensor v1 ByteString -> Tensor v2 ByteString -> ControlNode + +-- | Returns the shape of a tensor. +-- +-- This operation returns a 1-D integer tensor representing the shape of +-- input. +-- +-- For example: +-- +-- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` +shape :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => Tensor v1 t -> Tensor Value out_type + +-- | Computes softmax cross entropy cost and gradients to backpropagate. +-- +-- Inputs are the logits, not probabilities. +softmaxCrossEntropyWithLogits :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) + +-- | Performs max pooling on the input. +maxPool :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient of morphological 2-D dilation with respect to +-- the input. +dilation2DBackpropInput :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Returns the truth value of (x == y) element-wise. +-- +--
      +--
    • NOTE*: Equal supports broadcasting. More about +-- broadcasting here
    • +--
    +equal :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Computes the gradient of morphological 2-D dilation with respect to +-- the filter. +dilation2DBackpropFilter :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Computes rectified linear gradients for a Relu operation. +reluGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes rectified linear 6: `min(max(features, 0), 6)`. +relu6 :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Resize images to size using bicubic interpolation. +-- +-- Input images can be of different types but output images are always +-- float. +resizeBicubic :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float + +-- | Computes rectified linear 6 gradients for a Relu6 operation. +relu6Grad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Multiply SparseTensor (of rank 2) A by dense matrix B. +-- +-- No validity checking is performed on the indices of A. However, the +-- following input format is recommended for optimal behavior: +-- +-- if adjoint_a == false: A should be sorted in lexicographically +-- increasing order. Use SparseReorder if you're not sure. if adjoint_a +-- == true: A should be sorted in order of increasing dimension 1 (i.e., +-- "column major" order instead of "row major" order). +sparseTensorDenseMatMul :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t + +-- | Computes softplus: `log(exp(features) + 1)`. +softplus :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Multiplies slices of two tensors in batches. +-- +-- Multiplies all slices of Tensor x and y (each +-- slice can be viewed as an element of a batch), and arranges the +-- individual results in a single output tensor of the same batch size. +-- Each of the individual slices can optionally be adjointed (to adjoint +-- a matrix means to transpose and conjugate it) before multiplication by +-- setting the adj_x or adj_y flag to True, +-- which are by default False. +-- +-- The input tensors x and y are 3-D or higher with +-- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. +-- +-- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, +-- where: +-- +-- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y +-- +-- It is computed as: +-- +-- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) +batchMatMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes softsign gradients for a softsign operation. +softsignGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns the truth value of (x <= y) element-wise. +-- +--
      +--
    • NOTE*: LessEqual supports broadcasting. More about +-- broadcasting here
    • +--
    +lessEqual :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Computes log softmax activations. +-- +-- For each batch i and class j we have +-- +-- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) +logSoftmax :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Says whether the targets are in the top K predictions. +-- +-- This outputs a batch_size bool array, an entry `out[i]` is +-- true if the prediction for the target class is among the top +-- k predictions among all predictions for example i. +-- Note that the behavior of InTopK differs from the +-- TopK op in its handling of ties; if multiple classes have the +-- same prediction value and straddle the top-k boundary, all of +-- those classes are considered to be in the top k. +-- +-- More formally, let +-- +-- \(predictions_i\) be the predictions for all classes for example +-- i, \(targets_i\) be the target class for example i, +-- \(out_i\) be the output for example i, +-- +-- $$out_i = predictions_{i, targets_i} in +-- TopKIncludingTies(predictions_i)$$ +inTopK :: (TensorType t, OneOf '[Int32, Int64] t) => Int64 -> Tensor v1 Float -> Tensor v2 t -> Tensor Value Bool + +-- | Returns a batched diagonal tensor with a given batched diagonal +-- values. +-- +-- Given a diagonal, this operation returns a tensor with the +-- diagonal and everything else padded with zeros. The diagonal +-- is computed as follows: +-- +-- Assume diagonal has k dimensions `[I, J, K, ..., +-- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, +-- K, ..., N, N]` where: +-- +-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. +-- +-- For example: +-- +-- ```prettyprint # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]] +-- +-- and diagonal.shape = (2, 4) +-- +-- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, +-- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, +-- 8]]] +-- +-- which has shape (2, 4, 4) ``` +matrixDiag :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Performs 3D max pooling on the input. +maxPool3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Finds values and indices of the k largest elements for the +-- last dimension. +-- +-- If the input is a vector (rank-1), finds the k largest +-- entries in the vector and outputs their values and indices as vectors. +-- Thus `values[j]` is the j-th largest entry in input, +-- and its index is `indices[j]`. +-- +-- For matrices (resp. higher rank input), computes the top k +-- entries in each row (resp. vector along the last dimension). Thus, +-- +-- values.shape = indices.shape = input.shape[:-1] + [k] +-- +-- If two elements are equal, the lower-index element appears first. +-- +-- If k varies dynamically, use TopKV2 below. +topK :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v1 t -> (Tensor Value t, Tensor Value Int32) + +-- | Finds values and indices of the k largest elements for the +-- last dimension. +-- +-- If the input is a vector (rank-1), finds the k largest +-- entries in the vector and outputs their values and indices as vectors. +-- Thus `values[j]` is the j-th largest entry in input, +-- and its index is `indices[j]`. +-- +-- For matrices (resp. higher rank input), computes the top k +-- entries in each row (resp. vector along the last dimension). Thus, +-- +-- values.shape = indices.shape = input.shape[:-1] + [k] +-- +-- If two elements are equal, the lower-index element appears first. +-- +-- This is the same as TopK, but takes k as in input +-- rather than an attr. +topKV2 :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> (Tensor Value t, Tensor Value Int32) + +-- | Performs fractional max pooling on the input. +-- +-- Fractional max pooling is slightly different than regular max pooling. +-- In regular max pooling, you downsize an input set by taking the +-- maximum value of smaller N x N subsections of the set (often 2x2), and +-- try to reduce the set by a factor of N, where N is an integer. +-- Fractional max pooling, as you might expect from the word +-- "fractional", means that the overall reduction ratio N does not have +-- to be an integer. +-- +-- The sizes of the pooling regions are generated randomly but are fairly +-- uniform. For example, let's look at the height dimension, and the +-- constraints on the list of rows that will be pool boundaries. +-- +-- First we define the following: +-- +--
      +--
    1. input_row_length : the number of rows from the input set
    2. +--
    3. output_row_length : which will be smaller than the input
    4. +--
    5. alpha = input_row_length / output_row_length : our reduction +-- ratio
    6. +--
    7. K = floor(alpha)
    8. +--
    9. row_pooling_sequence : this is the result list of pool boundary +-- rows
    10. +--
    +-- +-- Then, row_pooling_sequence should satisfy: +-- +--
      +--
    1. a[0] = 0 : the first value of the sequence is 0
    2. +--
    3. a[end] = input_row_length : the last value of the sequence is the +-- size
    4. +--
    5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 +-- size
    6. +--
    7. length(row_pooling_sequence) = output_row_length+1
    8. +--
    +-- +-- For more details on fractional max pooling, see this paper: +-- Benjamin Graham, Fractional Max-Pooling +fractionalMaxPool :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64) + +-- | Copy a tensor setting everything outside a central band in each +-- innermost matrix +-- +-- to zero. +-- +-- The band part is computed as follows: Assume input +-- has k dimensions `[I, J, K, ..., M, N]`, then the output is a +-- tensor with the same shape where +-- +-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, +-- n]`. +-- +-- The indicator function 'in_band(m, n)` is one if `(num_lower < 0 || +-- (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= +-- num_upper)`, and zero otherwise. +-- +-- For example: +-- +-- ```prettyprint # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] +-- [-2, -1, 0, 1] [-3, -2, -1, 0]], +-- +-- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] +-- [ 0, -1, 0, 1] [ 0, 0, -1, 0]], +-- +-- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] +-- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` +-- +-- Useful special cases: +-- +-- ```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper +-- triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower +-- triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` +matrixBandPart :: (TensorType t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor Value t + +-- | Reinterpret the bytes of a string as a vector of numbers. +decodeRaw :: (TensorType out_type, OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] out_type) => Tensor v1 ByteString -> Tensor Value out_type + +-- | Convert JSON-encoded Example records to binary protocol buffer +-- strings. +-- +-- This op translates a tensor containing Example records, encoded using +-- the standard JSON mapping, into a tensor containing the same +-- records encoded as binary protocol buffers. The resulting tensor can +-- then be fed to any of the other Example-parsing ops. +decodeJSONExample :: Tensor v1 ByteString -> Tensor Value ByteString + +-- | Outputs random values from a truncated normal distribution. +-- +-- The generated values follow a normal distribution with mean 0 and +-- standard deviation 1, except that values whose magnitude is more than +-- 2 standard deviations from the mean are dropped and re-picked. +truncatedNormal :: (TensorType t, OneOf '[Int32, Int64] t, TensorType dtype, OneOf '[Word16, Double, Float] dtype) => Tensor v1 t -> Tensor Value dtype + +-- | Randomly shuffles a tensor along its first dimension. +-- +-- The tensor is shuffled along dimension 0, such that each `value[j]` is +-- mapped to one and only one `output[i]`. For example, a mapping that +-- might occur for a 3x2 tensor is: +-- +-- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, +-- 4]] ``` +randomShuffle :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Draws samples from a multinomial distribution. +multinomial :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Int64 + +-- | Outputs random values from the Gamma distribution(s) described by +-- alpha. +-- +-- This op uses the algorithm by Marsaglia et al. to acquire samples via +-- transformation-rejection from pairs of uniform and normal random +-- variables. See http://dl.acm.org/citation.cfm?id=358414 +randomGamma :: (TensorType s, OneOf '[Int32, Int64] s, TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 s -> Tensor v2 t -> Tensor Value t + +-- | Add all input tensors element wise. +addN :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v1 t] -> Tensor Value t + +-- | Computes the maximum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +max :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | A graph node which represents a return value of a function. +_Retval :: (TensorType t) => Int64 -> Tensor v1 t -> ControlNode + +-- | Destroys the temporary variable and returns its final value. +-- +-- Sets output to the value of the Tensor pointed to by ref, +-- then destroys the temporary variable called var_name. All +-- other uses of ref *must* have executed before this op. This +-- is typically achieved by chaining the ref through each assign op, or +-- by using control dependencies. +-- +-- Outputs the final value of the tensor pointed to by ref. +destroyTemporaryVariable :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Cast x of type SrcT to y of DstT. +cast :: (TensorType dstT, TensorType srcT) => Tensor v1 srcT -> Tensor Value dstT + +-- | Increments ref until it reaches limit. +-- +-- This operation outputs "ref" after the update is done. This makes it +-- easier to chain operations that need to use the updated value. +countUpTo :: (TensorType t, OneOf '[Int32, Int64] t) => Int64 -> Tensor v1 t -> Tensor Value t + +-- | Computes the absolute value of a tensor. +-- +-- Given a tensor x, this operation returns a tensor containing +-- the absolute value of each element in x. For example, if x is +-- an input element and y is an output element, this operation computes +-- \(y = |x|\). +abs :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes numerical negative value element-wise. +-- +-- I.e., \(y = -x\). +neg :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns the element-wise max of two SparseTensors. +-- +-- Assumes the two SparseTensors have the same shape, i.e., no +-- broadcasting. +sparseSparseMaximum :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> (Tensor Value Int64, Tensor Value t) + +-- | Computes the gradient for the inverse of x wrt its input. +-- +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is +-- the corresponding input gradient. +invGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes square root of x element-wise. +-- +-- I.e., \(y = sqrt{x} = x^{1/2}\). +sqrt :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the inverse of one or more square invertible matrices or +-- their +-- +-- adjoints (conjugate transposes). +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. The output is a tensor of the same +-- shape as the input containing the inverse for all input submatrices +-- `[..., :, :]`. +-- +-- The op uses LU decomposition with partial pivoting to compute the +-- inverses. +-- +-- If a matrix is not invertible there is no guarantee what the op does. +-- It may detect the condition and raise an exception or it may simply +-- return a garbage result. +matrixInverse :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient for the sqrt of x wrt its input. +-- +-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and +-- dy is the corresponding input gradient. +sqrtGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Inserts a dimension of 1 into a tensor's shape. +-- +-- Given a tensor input, this operation inserts a dimension of 1 +-- at the dimension index dim of input's shape. The +-- dimension index dim starts at zero; if you specify a negative +-- number for dim it is counted backward from the end. +-- +-- This operation is useful if you want to add a batch dimension to a +-- single element. For example, if you have a single image of shape +-- `[height, width, channels]`, you can make it a batch of 1 image with +-- `expand_dims(image, 0)`, which will make the shape `[1, height, width, +-- channels]`. +-- +-- Other examples: +-- +-- ```prettyprint # t is a tensor of shape [2] +-- shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> +-- [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] +-- +-- # t2 is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) +-- ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] +-- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` +-- +-- This operation requires that: +-- +-- `-1-input.dims() <= dim <= input.dims()` +-- +-- This operation is related to `squeeze()`, which removes dimensions of +-- size 1. +expandDims :: (TensorType t, TensorType tdim, OneOf '[Int32, Int64] tdim) => Tensor v1 t -> Tensor v2 tdim -> Tensor Value t + +-- | Computes the "logical and" of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +all :: (TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 Bool -> Tensor v2 tidx -> Tensor Value Bool + +-- | Performs beam search decoding on the logits given in input. +-- +-- A note about the attribute merge_repeated: For the beam search +-- decoder, this means that if consecutive entries in a beam are the +-- same, only the first of these is emitted. That is, when the top path +-- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B +-- B B" is returned if merge_repeated = False. +cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v1 Float -> Tensor v2 Int32 -> ([Tensor Value Int64], [Tensor Value Int64], [Tensor Value Int64], Tensor Value Float) + +-- | Computes reciprocal of square root of x element-wise. +-- +-- I.e., \(y = 1 / sqrt{x}\). +rsqrt :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradient for the tanh of x wrt its input. +-- +-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and +-- dy is the corresponding input gradient. +tanhGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes sin of x element-wise. +sin :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the determinant of one ore more square matrices. +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. The output is a tensor containing the +-- determinants for all input submatrices `[..., :, :]`. +matrixDeterminant :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes cos of x element-wise. +cos :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | BatchToSpace for 4-D tensors of type T. +-- +-- This is a legacy version of the more general BatchToSpaceND. +-- +-- Rearranges (permutes) data from batch into blocks of spatial data, +-- followed by cropping. This is the reverse transformation of +-- SpaceToBatch. More specifically, this op outputs a copy of the input +-- tensor where values from the batch dimension are moved in +-- spatial blocks to the height and width dimensions, +-- followed by cropping along the height and width +-- dimensions. +batchToSpace :: (TensorType t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Converts a sparse representation into a dense tensor. +-- +-- Builds an array dense with shape output_shape such +-- that +-- +-- ```prettyprint # If sparse_indices is scalar dense[i] = (i == +-- sparse_indices ? sparse_values : default_value) +-- +-- # If sparse_indices is a vector, then for each i +-- dense[sparse_indices[i]] = sparse_values[i] +-- +-- # If sparse_indices is an n by d matrix, then for each i in [0, n) +-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = +-- sparse_values[i] ``` +-- +-- All other values in dense are set to default_value. +-- If sparse_values is a scalar, all sparse indices are set to +-- this single value. +-- +-- Indices should be sorted in lexicographic order, and indices must not +-- contain any repeats. If validate_indices is true, these +-- properties are checked during execution. +sparseToDense :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tindices -> Tensor v2 tindices -> Tensor v3 t -> Tensor v4 t -> Tensor Value t + +-- | Computes asin of x element-wise. +asin :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns the index with the smallest value across dimensions of a +-- tensor. +argMin :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 + +-- | Returns which elements of x are Inf. +isInf :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool + +-- | Returns an element-wise indication of the sign of a number. +-- +-- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`. +-- +-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y +-- = 0`. +sign :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Returns x + y element-wise. +-- +--
      +--
    • NOTE*: Add supports broadcasting. AddN does not. +-- More about broadcasting here
    • +--
    +add :: (TensorType t, OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Update relevant entries in '*var' according to the Ftrl-proximal +-- scheme. +-- +-- That is for rows we have grad for, we update var, accum and linear as +-- follows: accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +sparseApplyFtrl :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Tensor Value t + +-- | Returns x - y element-wise. +-- +--
      +--
    • NOTE*: Sub supports broadcasting. More about broadcasting +-- here
    • +--
    +sub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +batchFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Computes the sum of elements across dimensions of a SparseTensor. +-- +-- This Op takes a SparseTensor and is the sparse counterpart to +-- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a +-- SparseTensor. +-- +-- Reduces sp_input along the dimensions given in +-- reduction_axes. Unless keep_dims is true, the rank +-- of the tensor is reduced by 1 for each entry in +-- reduction_axes. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +-- +-- If reduction_axes has no entries, all dimensions are reduced, +-- and a tensor with a single element is returned. Additionally, the axes +-- can be negative, which are interpreted according to the indexing rules +-- in Python. +sparseReduceSumSparse :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int32 -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) + +-- | Adds bias to value. +-- +-- This is a special case of `tf.add` where bias is restricted +-- to be 1-D. Broadcasting is supported, so value may have any +-- number of dimensions. +biasAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns x * y element-wise. +-- +--
      +--
    • NOTE*: Mul supports broadcasting. More about broadcasting +-- here
    • +--
    +mul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns x / y element-wise. +-- +--
      +--
    • NOTE*: Div supports broadcasting. More about broadcasting +-- here
    • +--
    +div :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Forwards the input to the output. +-- +-- This operator represents the loop termination condition used by the +-- "pivot" switches of a loop. +loopCond :: Tensor v1 Bool -> Tensor Value Bool + +-- | Returns (x - y)(x - y) element-wise. +-- +--
      +--
    • NOTE*: SquaredDifference supports broadcasting. More +-- about broadcasting here
    • +--
    +squaredDifference :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. +-- +--
      +--
    • NOTE*: Maximum supports broadcasting. More about +-- broadcasting here
    • +--
    +maximum :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Generates labels for candidate sampling with a log-uniform +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) + +-- | Returns the truth value of (x < y) element-wise. +-- +--
      +--
    • NOTE*: Less supports broadcasting. More about +-- broadcasting here
    • +--
    +less :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Computes the power of one value to another. +-- +-- Given a tensor x and a tensor y, this operation +-- computes \(x^y\) for corresponding elements in x and +-- y. For example: +-- +-- ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is +-- [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` +pow :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. +-- +-- The upper regularized incomplete Gamma function is defined as: +-- +-- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` +-- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper +-- incomplete Gama function. +-- +-- Note, above `P(a, x)` (Igamma) is the lower regularized +-- complete Gamma function. +igammac :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. +-- +-- The lower regularized incomplete Gamma function is defined as: +-- +-- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` +-- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower +-- incomplete Gamma function. +-- +-- Note, above `Q(a, x)` (Igammac) is the upper regularized +-- complete Gamma function. +igamma :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Compute the Hurwitz zeta function \(zeta(x, q)\). +-- +-- The Hurwitz zeta function is defined as: +-- +-- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ``` +zeta :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Returns the imaginary part of a complex number. +-- +-- Given a tensor input of complex numbers, this operation +-- returns a tensor of type float that is the imaginary part of +-- each element in input. All elements in input must be +-- complex numbers of the form \(a + bj\), where *a* is the real part and +-- *b* is the imaginary part returned by this operation. +-- +-- For example: +-- +-- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] +-- tf.imag(input) ==> [4.75, 5.75] ``` +imag :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout + +-- | Converts two real numbers to a complex number. +-- +-- Given a tensor real representing the real part of a complex +-- number, and a tensor imag representing the imaginary part of a +-- complex number, this operation returns complex numbers elementwise of +-- the form \(a + bj\), where *a* represents the real part and *b* +-- represents the imag part. +-- +-- The input tensors real and imag must have the same +-- shape. +-- +-- For example: +-- +-- ``` # tensor real is [2.25, 3.25] # tensor imag is +-- [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + +-- 5.75j]] ``` +complex :: (TensorType t, OneOf '[Double, Float] t, TensorType tout, OneOf '[Complex Double, Complex Float] tout) => Tensor v1 t -> Tensor v2 t -> Tensor Value tout + +-- | Returns the truth value of (x != y) element-wise. +-- +--
      +--
    • NOTE*: NotEqual supports broadcasting. More about +-- broadcasting here
    • +--
    +notEqual :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool + +-- | Computes the complex absolute value of a tensor. +-- +-- Given a tensor x of complex numbers, this operation returns a +-- tensor of type float or double that is the absolute +-- value of each element in x. All elements in x must +-- be complex numbers of the form \(a + bj\). The absolute value is +-- computed as \( sqrt{a^2 + b^2}\). +-- +-- For example: +-- +-- ``` # tensor x is [[-2.25 + 4.75j], [-3.25 + 5.75j]] +-- tf.complex_abs(x) ==> [5.25594902, 6.60492229] ``` +complexAbs :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout + +-- | Returns the truth value of x AND y element-wise. +-- +--
      +--
    • NOTE*: LogicalAnd supports broadcasting. More about +-- broadcasting here
    • +--
    +logicalAnd :: Tensor v1 Bool -> Tensor v2 Bool -> Tensor Value Bool +batchFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Selects elements from t or e, depending on +-- condition. +-- +-- The t, and e tensors must all have the same shape, +-- and the output will also have that shape. The condition +-- tensor must be a scalar if t and e are scalars. If +-- t and e are vectors or higher rank, then +-- condition must be either a vector with size matching the +-- first dimension of t, or must have the same shape as +-- t. +-- +-- The condition tensor acts as a mask that chooses, based on +-- the value at each element, whether the corresponding element / row in +-- the output should be taken from t (if true) or e (if +-- false). +-- +-- If condition is a vector and t and e are +-- higher rank matrices, then it chooses which row (outer dimension) to +-- copy from t and e. If condition has the +-- same shape as t and e, then it chooses which element +-- to copy from t and e. +-- +-- For example: +-- +-- ```prettyprint # condition tensor is [[True, False] # [False, +-- True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # +-- [7, 8]] select(condition, t, e) ==> [[1, 6], [7, 4]] +-- +-- # condition tensor is [True, False] # t is [[1, 2], +-- # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) +-- ==> [[1, 2], [7, 8]] +-- +-- ``` +select :: (TensorType t) => Tensor v1 Bool -> Tensor v2 t -> Tensor v3 t -> Tensor Value t + +-- | Multiply the matrix "a" by the matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" (after being transposed if transpose_a is true) must match the +-- outer dimension of "b" (after being transposed if transposed_b is +-- true). +-- +--
      +--
    • Note*: The default kernel implementation for MatMul on GPUs uses +-- cublas.
    • +--
    +matMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes Psi, the derivative of Lgamma (the log of the absolute value +-- of +-- +-- `Gamma(x)`), element-wise. +digamma :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Computes the gradients of convolution with respect to the filter. +conv2DBackpropFilter :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t + +-- | Computes the minimum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +min :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Returns which elements of x are finite. +isFinite :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool + +-- | Returns the index with the largest value across dimensions of a +-- tensor. +argMax :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 + +-- | Computes the mean along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = frac{sum_j data_j}{N}\) where +-- mean is over j such that `segment_ids[j] == i` and +-- N is the total number of values summed. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentMean.png" alt /div +segmentMean :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t + +-- | Compute the cumulative product of the tensor x along +-- axis. +-- +-- By default, this op performs an inclusive cumprod, which means that +-- the first element of the input is identical to the first element of +-- the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * +-- b * c] ``` +-- +-- By setting the exclusive kwarg to True, an exclusive +-- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], +-- exclusive=True) ==> [0, a, a * b] ``` +-- +-- By setting the reverse kwarg to True, the cumprod is +-- performed in the opposite direction: ```prettyprint tf.cumprod([a, b, +-- c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more +-- efficient than using separate `tf.reverse` ops. +-- +-- The reverse and exclusive kwargs can also be combined: +-- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) +-- ==> [b * c, c, 0] ``` +cumprod :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Computes the minimum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = min_j(data_j)\) where +-- min is over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentMin.png" alt /div +segmentMin :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t + +-- | Computes the sum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where +-- the sum is over tuples `j...` such that `segment_ids[j...] == i`. +-- Unlike SegmentSum, segment_ids need not be sorted +-- and need not cover all values in the full range of valid values. +-- +-- If the sum is empty for a given segment ID i, `output[i] = +-- 0`. +-- +-- num_segments should equal the number of distinct segment IDs. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/UnsortedSegmentSum.png" alt /div +unsortedSegmentSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 Int32 -> Tensor Value t + +-- | A Reader that outputs the records from a TensorFlow Records file. +tFRecordReader :: Tensor Value ByteString + +-- | Computes the sum along sparse segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Like SegmentSum, but segment_ids can have rank less +-- than `data`'s first dimension, selecting a subset of dimension 0, +-- specified by indices. +-- +-- For example: +-- +-- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) +-- +-- # Select two rows, one segment. tf.sparse_segment_sum(c, +-- tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]] +-- +-- # Select two rows, two segment. tf.sparse_segment_sum(c, +-- tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 +-- -4]] +-- +-- # Select all rows, two segments. tf.sparse_segment_sum(c, +-- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 +-- 7 8]] +-- +-- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) +-- ``` +sparseSegmentSum :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t + +-- | Computes the sum along sparse segments of a tensor divided by the sqrt +-- of N. +-- +-- N is the size of the segment being reduced. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +sparseSegmentSqrtN :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t + +-- | Copy Host Op. +-- +-- Performs CPU-to-CPU deep-copying of tensor. +-- +-- Unlike the Copy Op, this op has HostMemory constraint on its input or +-- output. +copyHost :: (TensorType t) => Tensor v1 t -> Tensor Value t + +-- | Holds state in the form of a tensor that persists across steps. +-- +-- Outputs a ref to the tensor state so it may be read or modified. +-- TODO(zhifengc/mrry): Adds a pointer to a more detail document about +-- sharing states in tensorflow. +variable :: (TensorType dtype) => Tensor Value dtype + +-- | Computes gradients for SparseSegmentSqrtN. +-- +-- Returns tensor "output" with same shape as grad, except for dimension +-- 0 whose value is output_dim0. +sparseSegmentSqrtNGrad :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t + +-- | Creates a sequence of integers. +-- +-- This operation creates a sequence of integers that begins at +-- start and extends by increments of delta up to but +-- not including limit. +-- +-- For example: +-- +-- ``` # start is 3 # limit is 18 # delta is 3 +-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` +range :: (TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 tidx -> Tensor v2 tidx -> Tensor v3 tidx -> Tensor Value tidx + +-- | Computes the "logical or" of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +any :: (TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 Bool -> Tensor v2 tidx -> Tensor Value Bool + +-- | Generates values in an interval. +-- +-- A sequence of num evenly-spaced values are generated +-- beginning at start. If `num > 1`, the values in the +-- sequence increase by `stop - start / num - 1`, so that the last one is +-- exactly stop. +-- +-- For example: +-- +-- ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 +-- 12.0] ``` +linSpace :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 t -> Tensor v3 tidx -> Tensor Value t + +-- | Resize images to size using area interpolation. +-- +-- Input images can be of different types but output images are always +-- float. +resizeArea :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float + +-- | Returns the real part of a complex number. +-- +-- Given a tensor input of complex numbers, this operation +-- returns a tensor of type float that is the real part of each +-- element in input. All elements in input must be +-- complex numbers of the form \(a + bj\), where *a* is the real part +-- returned by this operation and *b* is the imaginary part. +-- +-- For example: +-- +-- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] +-- tf.real(input) ==> [-2.25, 3.25] ``` +real :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout + +-- | Compute the inverse 1-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- dimension of input. +iFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Compute the inverse 3-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 3 dimensions of input. +iFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Compute the pairwise cross product. +-- +-- a and b must be the same shape; they can either be +-- simple 3-element vectors, or any shape where the innermost dimension +-- is 3. In the latter case, each pair of corresponding 3-element vectors +-- is cross-multiplied independently. +cross :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Compute the cumulative sum of the tensor x along +-- axis. +-- +-- By default, this op performs an inclusive cumsum, which means that the +-- first element of the input is identical to the first element of the +-- output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + +-- c] ``` +-- +-- By setting the exclusive kwarg to True, an exclusive +-- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], +-- exclusive=True) ==> [0, a, a + b] ``` +-- +-- By setting the reverse kwarg to True, the cumsum is +-- performed in the opposite direction: ```prettyprint tf.cumsum([a, b, +-- c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more +-- efficient than using separate `tf.reverse` ops. +-- +-- The reverse and exclusive kwargs can also be combined: +-- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) +-- ==> [b + c, c, 0] ``` +cumsum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +batchIFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | Computes the Gauss error function of x element-wise. +erf :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | For each key, assigns the respective value to the specified component. +-- +-- If a key is not found in the barrier, this operation will create a new +-- incomplete element. If a key is found in the barrier, and the element +-- already has a value at component_index, this operation will fail with +-- INVALID_ARGUMENT, and leave the barrier in an undefined state. +barrierInsertMany :: (TensorType t) => Int64 -> Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor v3 t -> ControlNode + +-- | Returns element-wise largest integer not greater than x. +floor :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +batchFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) + +-- | The gradient operator for the SparseAdd op. +-- +-- The SparseAdd op calculates A + B, where A, B, and the sum are all +-- represented as SparseTensor objects. This op takes in the +-- upstream gradient w.r.t. non-empty values of the sum, and outputs the +-- gradients w.r.t. the non-empty values of A and B. +sparseAddGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor v4 Int64 -> (Tensor Value t, Tensor Value t) + +-- | Adds two SparseTensor objects to produce another +-- SparseTensor. +-- +-- The input SparseTensor objects' indices are assumed ordered +-- in standard lexicographic order. If this is not the case, before this +-- step run SparseReorder to restore index ordering. +-- +-- By default, if two values sum to zero at some index, the output +-- SparseTensor would still include that particular location in +-- its index, storing a zero in the corresponding value slot. To override +-- this, callers can specify thresh, indicating that if the sum +-- has a magnitude strictly smaller than thresh, its +-- corresponding value and index would then not be included. In +-- particular, `thresh == 0` (default) means everything is kept and +-- actual thresholding happens only for a positive value. +-- +-- In the following shapes, nnz is the count after taking +-- thresh into account. +sparseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType treal, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> Tensor v7 treal -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) +batchCholesky :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t + +-- | Partitions `data` into num_partitions tensors using indices +-- from partitions. +-- +-- For each index tuple js of size `partitions.ndim`, the slice +-- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices +-- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic +-- order of js, and the first dimension of `outputs[i]` is the +-- number of entries in partitions equal to i. In +-- detail, +-- +-- outputs[i].shape = [sum(partitions == i)] + +-- data.shape[partitions.ndim:] +-- +-- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) +-- +-- `data.shape` must start with `partitions.shape`. +-- +-- For example: +-- +-- # Scalar partitions partitions = 1 num_partitions = 2 data = [10, 20] +-- outputs[0] = [] # Empty with shape [0, 2] outputs[1] = [[10, 20]] +-- +-- # Vector partitions partitions = [0, 0, 1, 1, 0] num_partitions = 2 +-- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = +-- [30, 40] +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/DynamicPartition.png" alt /div +dynamicPartition :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor v2 Int32 -> [Tensor Value t] + +-- | Serialize a SparseTensor into a string 3-vector (1-D +-- Tensor) object. +serializeSparse :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value ByteString + +-- | Concatenates a list of SparseTensor along the specified +-- dimension. +-- +-- Concatenation is with respect to the dense versions of these sparse +-- tensors. It is assumed that each input is a SparseTensor +-- whose elements are ordered along increasing dimension number. +-- +-- All inputs' shapes must match, except for the concat dimension. The +-- indices, values, and shapes lists must have +-- the same length. +-- +-- The output shape is identical to the inputs', except along the concat +-- dimension, where it is the sum of the inputs' sizes along that +-- dimension. +-- +-- The output elements will be resorted to preserve the sort order along +-- increasing dimension number. +-- +-- This op runs in `O(M log M)` time, where M is the total +-- number of non-empty values across all inputs. This is due to the need +-- for an internal sort in order to concatenate efficiently across an +-- arbitrary dimension. +-- +-- For example, if `concat_dim = 1` and the inputs are +-- +-- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" +-- +-- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" +-- +-- then the output will be +-- +-- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: +-- "c" +-- +-- Graphically this is equivalent to doing +-- +--
      +--
    • a concat [ d e ] = [ a d e ]
    • +--
    • b c [ ] [b c ]
    • +--
    +sparseConcat :: (TensorType t) => Int64 -> [Tensor v1 Int64] -> [Tensor v2 t] -> [Tensor v3 Int64] -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) + +-- | Computes the product along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = prod_j data_j\) where the +-- product is over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentProd.png" alt /div +segmentProd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t + +-- | Reshapes a SparseTensor to represent values in a new dense shape. +-- +-- This operation has the same semantics as reshape on the represented +-- dense tensor. The input_indices are recomputed based on the +-- requested new_shape. +-- +-- If one component of new_shape is the special value -1, the +-- size of that dimension is computed so that the total dense size +-- remains constant. At most one component of new_shape can be +-- -1. The number of dense elements implied by new_shape must be +-- the same as the number of dense elements originally implied by +-- input_shape. +-- +-- Reshaping does not affect the order of values in the SparseTensor. +-- +-- If the input tensor has rank R_in and N non-empty +-- values, and new_shape has length R_out, then +-- input_indices has shape `[N, R_in]`, input_shape has +-- length R_in, output_indices has shape `[N, R_out]`, +-- and output_shape has length R_out. +sparseReshape :: Tensor v1 Int64 -> Tensor v2 Int64 -> Tensor v3 Int64 -> (Tensor Value Int64, Tensor Value Int64) + +-- | Component-wise multiplies a SparseTensor by a dense Tensor. +-- +-- The output locations corresponding to the implicitly zero elements in +-- the sparse tensor will be zero (i.e., will not take up storage space), +-- regardless of the contents of the dense tensor (even if it's +/-INF +-- and that INF*0 == NaN). +-- +--
      +--
    • Limitation*: this Op only broadcasts the dense side to the sparse +-- side, but not the other direction.
    • +--
    +sparseDenseCwiseMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t + +-- | Component-wise divides a SparseTensor by a dense Tensor. +-- +--
      +--
    • Limitation*: this Op only broadcasts the dense side to the sparse +-- side, but not the other direction.
    • +--
    +sparseDenseCwiseDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html new file mode 100644 index 0000000..8788b6e --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.Parse

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.Parse

    Documentation

    type MNIST = Vector Word8 Source

    Utilities specific to MNIST.

    drawMNIST :: MNIST -> Text Source

    Produces a unicode rendering of the MNIST digit sample.

    checkEndian :: Get () Source

    Check's the file's endianess, throwing an error if it's not as expected.

    readMNISTSamples :: FilePath -> IO [MNIST] Source

    Reads an MNIST file and returns a list of samples.

    readMNISTLabels :: FilePath -> IO [Word8] Source

    Reads a list of MNIST labels from a file and returns them.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html new file mode 100644 index 0000000..630e30a --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.TrainedGraph

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.TrainedGraph

    Description

    Paths to test helper files.

    Documentation

    mnistPb :: IO FilePath Source

    File containing a Tensorflow serialized proto of MNIST.

    wtsCkpt :: IO ByteString Source

    Files containing pre-trained weights for MNIST.

    biasCkpt :: IO ByteString Source

    Files containing pre-trained weights for MNIST.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html new file mode 100644 index 0000000..5f705d0 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model. (Index)

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html b/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-mnist-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model. \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/index.html b/docs/haddock/tensorflow-mnist-0.1.0.0/index.html new file mode 100644 index 0000000..3ccb460 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Please see README.md

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html new file mode 100644 index 0000000..720a2bf --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-Parse.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.Parse

    TensorFlow.Examples.MNIST.Parse

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html new file mode 100644 index 0000000..f4f119a --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/mini_TensorFlow-Examples-MNIST-TrainedGraph.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.TrainedGraph

    TensorFlow.Examples.MNIST.TrainedGraph

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/minus.gif b/docs/haddock/tensorflow-mnist-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/plus.gif b/docs/haddock/tensorflow-mnist-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html new file mode 100644 index 0000000..96d3087 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html @@ -0,0 +1,46 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Paths_tensorflow_mnist.hs + + + +
    module Paths_tensorflow_mnist (
    +    version,
    +    getBinDir, getLibDir, getDataDir, getLibexecDir,
    +    getDataFileName, getSysconfDir
    +  ) where
    +
    +import qualified Control.Exception as Exception
    +import Data.Version (Version(..))
    +import System.Environment (getEnv)
    +import Prelude
    +
    +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
    +catchIO = Exception.catch
    +
    +version :: Version
    +version = Version [0,1,0,0] []
    +bindir, libdir, datadir, libexecdir, sysconfdir :: FilePath
    +
    +bindir     = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/bin"
    +libdir     = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/lib/x86_64-linux-ghc-7.10.3/tensorflow-mnist-0.1.0.0-2agxd0imrn964MW1mWb4VF"
    +datadir    = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/share/x86_64-linux-ghc-7.10.3/tensorflow-mnist-0.1.0.0"
    +libexecdir = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/libexec"
    +sysconfdir = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/etc"
    +
    +getBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
    +getBinDir = catchIO (getEnv "tensorflow_mnist_bindir") (\_ -> return bindir)
    +getLibDir = catchIO (getEnv "tensorflow_mnist_libdir") (\_ -> return libdir)
    +getDataDir = catchIO (getEnv "tensorflow_mnist_datadir") (\_ -> return datadir)
    +getLibexecDir = catchIO (getEnv "tensorflow_mnist_libexecdir") (\_ -> return libexecdir)
    +getSysconfDir = catchIO (getEnv "tensorflow_mnist_sysconfdir") (\_ -> return sysconfdir)
    +
    +getDataFileName :: FilePath -> IO FilePath
    +getDataFileName name = do
    +  dir <- getDataDir
    +  return (dir ++ "/" ++ name)
    +
    + diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html new file mode 100644 index 0000000..27b8468 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html @@ -0,0 +1,107 @@ + + + + + +src/TensorFlow/Examples/MNIST/Parse.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE OverloadedLists #-}
    +{-# LANGUAGE TypeSynonymInstances #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE ViewPatterns #-}
    +
    +module TensorFlow.Examples.MNIST.Parse where
    +
    +import Control.Monad (when, liftM)
    +import Data.Binary.Get (Get, runGet, getWord32be, getLazyByteString)
    +import Data.ByteString.Lazy (toStrict, readFile)
    +import Data.List.Split (chunksOf)
    +import Data.Monoid ((<>))
    +import Data.ProtoLens (Message, decodeMessageOrDie)
    +import Data.Text (Text)
    +import Data.Word (Word8, Word32)
    +import Prelude hiding (readFile)
    +import qualified Codec.Compression.GZip as GZip
    +import qualified Data.ByteString.Lazy as L
    +import qualified Data.Text as Text
    +import qualified Data.Vector as V
    +
    +-- | Utilities specific to MNIST.
    +type MNIST = V.Vector Word8
    +
    +-- | Produces a unicode rendering of the MNIST digit sample.
    +drawMNIST :: MNIST -> Text
    +drawMNIST = chunk . block
    +  where
    +    block :: V.Vector Word8 -> Text
    +    block (V.splitAt 1 -> ([0], xs)) = " " <> block xs
    +    block (V.splitAt 1 -> ([n], xs)) = c `Text.cons` block xs
    +      where c = "\9617\9618\9619\9608" !! fromIntegral (n `div` 64)
    +    block (V.splitAt 1 -> _)   = ""
    +    chunk :: Text -> Text
    +    chunk "" = "\n"
    +    chunk xs = Text.take 28 xs <> "\n" <> chunk (Text.drop 28 xs)
    +
    +-- | Check's the file's endianess, throwing an error if it's not as expected.
    +checkEndian :: Get ()
    +checkEndian = do
    +    magic <- getWord32be
    +    when (magic `notElem` ([2049, 2051] :: [Word32])) $
    +        fail "Expected big endian, but image file is little endian."
    +
    +-- | Reads an MNIST file and returns a list of samples.
    +readMNISTSamples :: FilePath -> IO [MNIST]
    +readMNISTSamples path = do
    +    raw <- GZip.decompress <$> readFile path
    +    return $ runGet getMNIST raw
    +  where
    +    getMNIST :: Get [MNIST]
    +    getMNIST = do
    +        checkEndian
    +        -- Parse header data.
    +        cnt  <- liftM fromIntegral getWord32be
    +        rows <- liftM fromIntegral getWord32be
    +        cols <- liftM fromIntegral getWord32be
    +        -- Read all of the data, then split into samples.
    +        pixels <- getLazyByteString $ fromIntegral $ cnt * rows * cols
    +        return $ V.fromList <$> chunksOf (rows * cols) (L.unpack pixels)
    +
    +-- | Reads a list of MNIST labels from a file and returns them.
    +readMNISTLabels :: FilePath -> IO [Word8]
    +readMNISTLabels path = do
    +    raw <- GZip.decompress <$> readFile path
    +    return $ runGet getLabels raw
    +  where getLabels :: Get [Word8]
    +        getLabels = do
    +            checkEndian
    +            -- Parse header data.
    +            cnt <- liftM fromIntegral getWord32be
    +            -- Read all of the labels.
    +            L.unpack <$> getLazyByteString cnt
    +
    +readMessageFromFileOrDie :: Message m => FilePath -> IO m
    +readMessageFromFileOrDie path = do
    +    pb <- readFile path
    +    return $ decodeMessageOrDie $ toStrict pb
    +
    +-- TODO: Write a writeMessageFromFileOrDie and read/write non-lethal
    +--             versions.
    +
    + diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html new file mode 100644 index 0000000..881e4d8 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html @@ -0,0 +1,41 @@ + + + + + +src-data/TensorFlow/Examples/MNIST/TrainedGraph.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE OverloadedStrings #-}
    +-- | Paths to test helper files.
    +module TensorFlow.Examples.MNIST.TrainedGraph where
    +
    +import Paths_tensorflow_mnist (getDataFileName)
    +import Data.ByteString (ByteString)
    +import Data.ByteString.Char8 (pack)
    +
    +-- | File containing a Tensorflow serialized proto of MNIST.
    +mnistPb :: IO FilePath
    +mnistPb = getDataFileName "data/MNIST.pb"
    +
    +-- | Files containing pre-trained weights for MNIST.
    +wtsCkpt, biasCkpt :: IO ByteString
    +wtsCkpt = pack <$> getDataFileName "data/MNISTWts.ckpt"
    +biasCkpt = pack <$> getDataFileName "data/MNISTBias.ckpt"
    +
    + diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-mnist-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt b/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt new file mode 100644 index 0000000..5248c19 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/tensorflow-mnist.txt @@ -0,0 +1,41 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | TensorFlow demo application for learning MNIST model. +-- +-- Please see README.md +@package tensorflow-mnist +@version 0.1.0.0 + + +-- | Paths to test helper files. +module TensorFlow.Examples.MNIST.TrainedGraph + +-- | File containing a Tensorflow serialized proto of MNIST. +mnistPb :: IO FilePath + +-- | Files containing pre-trained weights for MNIST. +wtsCkpt :: IO ByteString + +-- | Files containing pre-trained weights for MNIST. +biasCkpt :: IO ByteString + +module TensorFlow.Examples.MNIST.Parse + +-- | Utilities specific to MNIST. +type MNIST = Vector Word8 + +-- | Produces a unicode rendering of the MNIST digit sample. +drawMNIST :: MNIST -> Text + +-- | Check's the file's endianess, throwing an error if it's not as +-- expected. +checkEndian :: Get () + +-- | Reads an MNIST file and returns a list of samples. +readMNISTSamples :: FilePath -> IO [MNIST] + +-- | Reads a list of MNIST labels from a file and returns them. +readMNISTLabels :: FilePath -> IO [Word8] +readMessageFromFileOrDie :: Message m => FilePath -> IO m diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html new file mode 100644 index 0000000..22288df --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.InputData

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.InputData

    Documentation

    trainingImageData :: IO FilePath Source

    Download the files containing the canonical MNIST samples and labels.

    trainingLabelData :: IO FilePath Source

    Download the files containing the canonical MNIST samples and labels.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html new file mode 100644 index 0000000..0fe16fa --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST. (Index)

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST. \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html new file mode 100644 index 0000000..c041e18 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    Please see README.md

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html new file mode 100644 index 0000000..d719ceb --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/mini_TensorFlow-Examples-MNIST-InputData.html @@ -0,0 +1,4 @@ +TensorFlow.Examples.MNIST.InputData

    TensorFlow.Examples.MNIST.InputData

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/minus.gif b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/plus.gif b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html new file mode 100644 index 0000000..9fead8c --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html @@ -0,0 +1,46 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Paths_tensorflow_mnist_input_data.hs + + + +
    module Paths_tensorflow_mnist_input_data (
    +    version,
    +    getBinDir, getLibDir, getDataDir, getLibexecDir,
    +    getDataFileName, getSysconfDir
    +  ) where
    +
    +import qualified Control.Exception as Exception
    +import Data.Version (Version(..))
    +import System.Environment (getEnv)
    +import Prelude
    +
    +catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
    +catchIO = Exception.catch
    +
    +version :: Version
    +version = Version [0,1,0,0] []
    +bindir, libdir, datadir, libexecdir, sysconfdir :: FilePath
    +
    +bindir     = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/bin"
    +libdir     = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/lib/x86_64-linux-ghc-7.10.3/tensorflow-mnist-input-data-0.1.0.0-JIQTTyi85Nv6pdBnglu33Q"
    +datadir    = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/share/x86_64-linux-ghc-7.10.3/tensorflow-mnist-input-data-0.1.0.0"
    +libexecdir = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/libexec"
    +sysconfdir = "/home/gnezdo/tensorflow-haskell/.stack-work/install/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/lts-6.2/7.10.3/etc"
    +
    +getBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
    +getBinDir = catchIO (getEnv "tensorflow_mnist_input_data_bindir") (\_ -> return bindir)
    +getLibDir = catchIO (getEnv "tensorflow_mnist_input_data_libdir") (\_ -> return libdir)
    +getDataDir = catchIO (getEnv "tensorflow_mnist_input_data_datadir") (\_ -> return datadir)
    +getLibexecDir = catchIO (getEnv "tensorflow_mnist_input_data_libexecdir") (\_ -> return libexecdir)
    +getSysconfDir = catchIO (getEnv "tensorflow_mnist_input_data_sysconfdir") (\_ -> return sysconfdir)
    +
    +getDataFileName :: FilePath -> IO FilePath
    +getDataFileName name = do
    +  dir <- getDataDir
    +  return (dir ++ "/" ++ name)
    +
    + diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html new file mode 100644 index 0000000..2dd0400 --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html @@ -0,0 +1,42 @@ + + + + + +src/TensorFlow/Examples/MNIST/InputData.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +module TensorFlow.Examples.MNIST.InputData
    +  ( trainingImageData
    +  , trainingLabelData
    +  , testImageData
    +  , testLabelData
    +  ) where
    +
    +import Paths_tensorflow_mnist_input_data (getDataFileName)
    +
    +-- | Download the files containing the canonical MNIST samples and labels.
    +trainingImageData, trainingLabelData :: IO FilePath
    +trainingImageData = getDataFileName "train-images-idx3-ubyte.gz"
    +trainingLabelData = getDataFileName "train-labels-idx1-ubyte.gz"
    +
    +testImageData, testLabelData :: IO FilePath
    +testImageData = getDataFileName "t10k-images-idx3-ubyte.gz"
    +testLabelData = getDataFileName "t10k-labels-idx1-ubyte.gz"
    +
    + diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt new file mode 100644 index 0000000..afb87ad --- /dev/null +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/tensorflow-mnist-input-data.txt @@ -0,0 +1,19 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Downloader of input data for training MNIST. +-- +-- Please see README.md +@package tensorflow-mnist-input-data +@version 0.1.0.0 + +module TensorFlow.Examples.MNIST.InputData + +-- | Download the files containing the canonical MNIST samples and labels. +trainingImageData :: IO FilePath + +-- | Download the files containing the canonical MNIST samples and labels. +trainingLabelData :: IO FilePath +testImageData :: IO FilePath +testLabelData :: IO FilePath diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-AttrVal.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-AttrVal.html new file mode 100644 index 0000000..783aa85 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-AttrVal.html @@ -0,0 +1,6 @@ +TensorFlow.OpGen.AttrVal

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.OpGen.AttrVal

    Description

    Wrapping of TensorFlow attributes into Haskell entities.

    Documentation

    data AttrTemplate Source

    Type-reified representation of TensorFlow AttrDef. + Initially limited to just the types in Op descriptors.

    Constructors

    AttrSingle (AttrCase Template) 
    AttrList (AttrCase []) 
    AttrTensor UnusedTensor 

    data Template a Source

    Specifies the optional default value and a set of allowed values + for the given type.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html new file mode 100644 index 0000000..2e411d7 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html @@ -0,0 +1,4 @@ +TensorFlow.OpGen

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.OpGen

    Description

    Rendering of TensorFlow operations as Haskell functions.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html new file mode 100644 index 0000000..e02dd2e --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations. (Index)

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html b/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-opgen-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations. \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/index.html b/docs/haddock/tensorflow-opgen-0.1.0.0/index.html new file mode 100644 index 0000000..de25a07 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Please see README.md

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-AttrVal.html b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-AttrVal.html new file mode 100644 index 0000000..c945e9a --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-AttrVal.html @@ -0,0 +1,4 @@ +TensorFlow.OpGen.AttrVal

    TensorFlow.OpGen.AttrVal

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html new file mode 100644 index 0000000..694d0c0 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen.html @@ -0,0 +1,4 @@ +TensorFlow.OpGen

    TensorFlow.OpGen

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/minus.gif b/docs/haddock/tensorflow-opgen-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/plus.gif b/docs/haddock/tensorflow-opgen-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-AttrVal.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-AttrVal.html new file mode 100644 index 0000000..2cbe9fb --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-AttrVal.html @@ -0,0 +1,131 @@ + + + + + +src/TensorFlow/OpGen/AttrVal.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE OverloadedStrings #-}
    +
    +-- | Wrapping of TensorFlow attributes into Haskell entities.
    +module TensorFlow.OpGen.AttrVal
    +       (AttrDef
    +       , AttrCase(..)
    +       , AttrTemplate(..)
    +       , Template
    +       , attrDef
    +       , attrOriginal
    +       , attrTemplate
    +       , templateDefault
    +       , templateRestrictions
    +       ) where
    +
    +import Data.Int (Int64)
    +import Data.Monoid ((<>))
    +import Lens.Family2 (Lens', (^.))
    +import Lens.Family2.Unchecked (lens)
    +import Proto.Tensorflow.Core.Framework.AttrValue as AttrValue
    +import Proto.Tensorflow.Core.Framework.OpDef as OpDef
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +import Proto.Tensorflow.Core.Framework.TensorShape (TensorShapeProto)
    +import qualified Data.ByteString as B
    +import qualified Data.Text as Text
    +
    +-- | Specifies the optional default value and a set of allowed values
    +-- for the given type.
    +data Template a = Template {
    +    _templateDefault      :: Maybe a
    +    -- ^ The default value (mandatory if unspecified)
    +  , _templateRestrictions :: [a]
    +    -- ^ The allowed set of values, empty if no restrictions
    + }
    +
    +templateDefault :: Lens' (Template a) (Maybe a)
    +templateDefault = lens _templateDefault (\g x -> g { _templateDefault = x })
    +
    +templateRestrictions :: Lens' (Template a) [a]
    +templateRestrictions = lens _templateRestrictions
    +                            (\g x -> g { _templateRestrictions = x })
    +
    +data UnusedTensor
    +
    +data AttrCase f
    +  = AttrBytes (f B.ByteString)          -- bytes s = 2; // "string"
    +  | AttrInt64 (f Int64)                 -- int64 i = 3; // "int"
    +  | AttrFloat (f Float)                 -- float f = 4; // "float"
    +  | AttrBool  (f Bool)                  -- bool b = 5;  // "bool"
    +  | AttrType  (f DataType)              -- type = 6; // "type"
    +    -- To be translated into TensorFlow.Types.Shape before use.
    +    -- Leaving as a proto to reduce dependencies.
    +  | AttrShape (f TensorShapeProto)      -- shape = 7; // "shape"
    +
    +-- | Type-reified representation of TensorFlow AttrDef.
    +-- Initially limited to just the types in Op descriptors.
    +data AttrTemplate
    +  = AttrSingle (AttrCase Template)
    +  | AttrList (AttrCase [])
    +  | AttrTensor UnusedTensor         -- tensor = 8; // "tensor"
    +
    +data AttrDef = AttrDef {
    +    _attrOriginal :: OpDef'AttrDef -- ^ the proto this value was created from
    +  , _attrTemplate :: AttrTemplate  -- ^ the type of the attribute
    +  }
    +
    +attrTemplate :: Lens' AttrDef AttrTemplate
    +attrTemplate = lens _attrTemplate (\g x -> g { _attrTemplate = x })
    +
    +attrOriginal :: Lens' AttrDef OpDef'AttrDef
    +attrOriginal = lens _attrOriginal (\g x -> g { _attrOriginal = x })
    +
    +attrDef :: OpDef'AttrDef -> AttrDef
    +attrDef a = AttrDef a
    +                  $ translate (a^.OpDef.type')
    +                              (a^.OpDef.defaultValue)
    +                              (a^.allowedValues)
    +
    +-- | Converts the given AttrValue with the type given by the string
    +-- into the AttrVal if the type is known.
    +translate :: Text.Text  -- ^ one of the TensorFlow type strings
    +          -> AttrValue  -- ^ default value
    +          -> AttrValue  -- ^ allowed values
    +          -> AttrTemplate
    +translate t defaults allowed
    +  | t == "string" = makeVal AttrBytes maybe's s
    +  | t == "int" = makeVal AttrInt64 maybe'i i
    +  | t == "float" = makeVal AttrFloat maybe'f f
    +  | t == "bool" = makeVal AttrBool maybe'b b
    +  | t == "type" = makeVal AttrType AttrValue.maybe'type' AttrValue.type'
    +  | t == "shape" = makeVal AttrShape maybe'shape shape
    +  | t == "tensor" = AttrTensor $ error "tensor is unimplemented"
    +  | t == "list(string)" = makeList AttrBytes $ list.s
    +  | t == "list(int)" = makeList AttrInt64 $ list.i
    +  | t == "list(float)" = makeList AttrFloat $ list.f
    +  | t == "list(bool)" = makeList AttrBool $ list.b
    +  | t == "list(type)" = makeList AttrType $ list.AttrValue.type'
    +  | t == "list(shape)" = makeList AttrShape $ list.shape
    +  | t == "list(tensor)" = AttrTensor $ error "list(tensor) is unimplemented"
    +  | t == "func" = AttrTensor $ error "func is unimplemented"
    +  | otherwise = error $ show ("Unknown attribute type " <> t) ++
    +                        "," ++ show defaults ++
    +                        "," ++ show allowed
    +  where makeVal c x y = AttrSingle $ c $
    +                        Template (defaults^.x) (allowed^.list.y)
    +        makeList c y  = AttrList $ c $ defaults^.y
    +
    + diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html new file mode 100644 index 0000000..7ae17e2 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html @@ -0,0 +1,468 @@ + + + + + +src/TensorFlow/OpGen.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE TypeFamilies #-}
    +-- | Rendering of TensorFlow operations as Haskell functions.
    +
    +module TensorFlow.OpGen
    +  ( OpGenFlags(..)
    +  , docOpList
    +  , flagParser)
    +  where
    +
    +import Prelude hiding (head, tail)
    +
    +import Control.Monad (guard)
    +import Data.Char (toLower, toUpper)
    +import Data.Foldable (toList)
    +import Data.Maybe (fromMaybe, maybeToList)
    +import Data.ProtoLens (def, showMessage)
    +import Data.List.NonEmpty (NonEmpty((:|)), head)
    +import qualified Data.List.NonEmpty as NE
    +import Lens.Family2 ((^.), (.~), (&), view)
    +import Options.Applicative (Parser, help, long, strOption, value)
    +import Proto.Tensorflow.Core.Framework.OpDef
    +  ( OpList
    +  , OpDef
    +  , OpDef'ArgDef
    +  , attr
    +  , description
    +  , inputArg
    +  , name
    +  , numberAttr
    +  , op
    +  , outputArg
    +  , summary
    +  , type'
    +  , typeAttr
    +  )
    +import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    +import System.FilePath (takeBaseName)
    +import TensorFlow.OpGen.AttrVal
    +  (AttrDef
    +  , AttrCase(..)
    +  , AttrTemplate(..)
    +  , Template
    +  , attrDef
    +  , attrOriginal
    +  , attrTemplate
    +  , templateDefault
    +  , templateRestrictions
    +  )
    +import Text.PrettyPrint.Mainland
    +  ( Doc
    +  , (<>)
    +  , (<+>)
    +  , (</>)
    +  , (<+/>)
    +  , brackets
    +  , comma
    +  , commasep
    +  , dquotes
    +  , empty
    +  , enclose
    +  , flatten
    +  , folddoc
    +  , hang
    +  , indent
    +  , int
    +  , parens
    +  , sep
    +  , stack
    +  , strictText
    +  , tuple
    +  )
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +import qualified Data.Text as Text
    +import qualified Data.Semigroup as Semigroup
    +import Data.Text (Text)
    +
    +data OpGenFlags = OpGenFlags
    +     { outputFile :: String
    +     , prefix :: String
    +     , excludeList :: String
    +     }
    +
    +flagParser :: Parser OpGenFlags
    +flagParser = OpGenFlags
    +     <$> strOption (mconcat [ long "output"
    +                            , help "File to write."
    +                            ])
    +     <*> strOption (mconcat [ long "prefix"
    +                            , help "Haskell package prefix to use"
    +                            ])
    +     <*> strOption (mconcat [ long "exclude_list"
    +                            , value ""
    +                            , help "Comma separated Ops names to ignore"
    +                            ])
    +
    +
    +docOpList :: OpGenFlags -> OpList -> Doc
    +docOpList flags opList =
    +  stack [ "{-# LANGUAGE ConstraintKinds #-}"
    +        , "{-# LANGUAGE DataKinds #-}"
    +        , "{-# LANGUAGE FlexibleInstances #-}"
    +        , "{-# LANGUAGE OverloadedStrings #-}"
    +        , "{-# LANGUAGE RankNTypes #-}"
    +        , "{-# LANGUAGE ScopedTypeVariables #-}"
    +        , "module" <+> strictText moduleName <+> "where"
    +        , empty
    +        , imports
    +        , empty
    +        , folddoc (\x y -> x </> empty </> y)
    +                  (map renderDef $
    +                   filter (not . flip elem exclusions . view name) $
    +                   toList $ opList ^. op)
    +        ]
    +  where moduleName =
    +            Text.pack (prefix flags) <> "." <> camelCase
    +             -- Discards the optional trailing _op_lib
    +            (fromMaybe shortName (Text.stripSuffix "_op_lib" shortName))
    +        shortName = Text.pack (takeBaseName $ outputFile flags)
    +        exclusions = Text.splitOn "," $ Text.pack $ excludeList flags
    +
    +camelCase s = Text.concat $ map upCase
    +                          $ filter (/= "ops")
    +                          $ Text.splitOn "_" s
    +
    +-- | Upper-case the given text.
    +upCase :: Text -> Text
    +upCase = forceCase toUpper
    +
    +-- | Lower-case the given name, and prevent it from overlapping with a reserved
    +-- Haskell name.
    +lowCase :: Text -> Text
    +lowCase = replaceReservedName . forceCase toLower
    +
    +forceCase :: (Char -> Char) -> Text -> Text
    +forceCase convert s = maybe "" (\(c, cs) -> Text.cons (convert c) cs)
    +                      (Text.uncons s)
    +
    +imports = stack [
    +      "import Data.ByteString (ByteString)"
    +    , "import Data.Complex (Complex)"
    +    , "import Data.Int (Int8, Int16, Int32, Int64)"
    +    , "import Data.Word (Word8, Word16)"
    +    , "import Lens.Family2 ((.~), (&))"
    +    , "import TensorFlow.Build"
    +    , "import TensorFlow.BuildOp"
    +    , "import TensorFlow.Tensor"
    +    , "import TensorFlow.Types"
    +      ]
    +
    +renderDef :: OpDef -> Doc
    +renderDef d =
    +  stack [
    +      haddocks
    +    , n <+> "::" <+> hang 0 (typeSig d)
    +    , n <+> hang 0 args <+> "|" <+> funcGuard <+> "=" </>  -- args are indented
    +            -- the body needs to be indented wrt the name
    +            indent indentation functionBody
    +    , extras  -- just for debug
    +    ]
    +  where
    +    n = strictText $ fixOpName (d ^. name)
    +    args = sep $ [hsName | (_, hsName) <- mandatoryAttrs] ++ tensorArgs
    +    tensorArgs = [strictText $ lowCase (a ^. name) | a <- d ^. inputArg]
    +    fixOpName = lowCase
    +    funcGuard = "eqLengthGuard" <+> brackets (commasep entries)
    +      where
    +        entries =
    +            [ parens $ quotedText nAttr <> comma <+>
    +              brackets (commasep $ toList $
    +              NE.map renderTensorName tensorNames)
    +            | (nAttr, tensorNames) <- Map.toList $ numberAttrMap d
    +            ]
    +        renderTensorName x = parens $ quotedText x <> comma <+>
    +                             "length" <+> strictText x
    +    -- Uses hang 0 to align the argument vertically on multiple lines.
    +    functionBody = buildFunction <+> parens (hang 0 (stack buildOpParts))
    +                                 </> indent indentation (sep tensorArgs)
    +    buildFunction
    +        | null outputListsSizes = "buildOp"
    +        | otherwise = "buildListOp" <+> brackets (commasep outputListsSizes)
    +    outputListsSizes = [ strictText numberAttrName
    +                       | o <- d ^. outputArg
    +                       , let numberAttrName = o ^. numberAttr
    +                       , not (Text.null numberAttrName) &&
    +                         numberAttrName `Map.member` mandatoryAttrMap d
    +                       ]
    +    buildOpParts =
    +        "opDef" <+> quotedText (d ^. name) :
    +        -- Renders tensor arguments.
    +        [ "& opAttr" <+> quotedText tfName <+>
    +          ".~ tensorType (undefined ::" <+> strictText hsName <> ")"
    +        | (tfName, (hsName, _)) <- Map.toList typeMap
    +        ] ++
    +        -- Renders mandatory attributes as function parameters.
    +        [ "& opAttr" <+> dquotes tfName <+> ".~" <+> hsName
    +        | (tfName, hsName) <- mandatoryAttrs
    +        ] ++
    +        -- Renders sizes of tensor list types having number_attr.
    +        [ "& opAttr" <+> quotedText nAttr <+> ".~" <+>
    +          "(fromIntegral (length" <+> strictText (head tensorNames) <> ") :: Int64)"
    +        | (nAttr, tensorNames) <- Map.toList $ numberAttrMap d
    +        ]
    +    mandatoryAttrs = [(strictText tf, strictText hs)
    +                     | (tf, (hs, _, _)) <- Map.toList (mandatoryAttrMap d)
    +                     ]
    +    haddocks = "-- |" <+> multilineComment (d ^. summary) (d ^. description)
    +    extras = enclose "{-\n" "\n-}" $
    +             strictText $ Text.pack $
    +             showMessage ((def :: OpDef)
    +                          & inputArg .~ (d ^. inputArg)
    +                          & outputArg .~ (d ^. outputArg)
    +                          & attr .~ (d ^. attr))
    +    typeMap = opDefTypeMap d
    +
    +-- | Makes a quoted string doc out of the given text value.
    +quotedText :: Text.Text -> Doc
    +quotedText = dquotes . strictText
    +
    +-- | typeSig renders the type signature of the given OpDef.
    +typeSig :: OpDef -> Doc
    +typeSig d =
    +    foralls <+> constraints <+/>
    +    signatureFold (mandatoryAttrInputs ++ tensorInputs ++ [outputs])
    +  where
    +    foralls | Map.null typeMap = empty
    +            | otherwise =
    +              "forall"
    +              <+> sep (refTypes ++ map (strictText . fst) (Map.elems typeMap))
    +              <+> "."
    +    constraints | Map.null typeMap = empty
    +                | otherwise =
    +                  tuple (concatMap
    +                         (\(t, aDef) ->
    +                           "TensorType" <+> strictText t
    +                           : maybeToList (oneOfRestrictions aDef t))
    +                         (Map.elems typeMap)) <+> "=>"
    +    tensorInputs = zipWith tensorArg refTypes (d ^. inputArg)
    +    refTypes = map (\x -> "v" <> int x) [1..length (d ^. inputArg)]
    +    tensorArg refType arg = wrapArg refType arg <+>
    +                            hang 0 ("-- ^" <+> argComment arg)
    +    -- Argument type is a list of tensors if number_attr is set;
    +    -- otherwise it's a single Tensor.
    +    wrapArg refType arg =
    +        if Text.null (arg ^. numberAttr) then typ else brackets typ
    +      where typ = tensorType refType arg
    +    tensorType refType arg =
    +      "Tensor" <+> refType <+> maybe directType strictText indirectType
    +      where
    +        indirectType = fmap fst (Map.lookup (arg ^. typeAttr) typeMap)
    +        directType = dtTypeToDoc (arg ^. type')
    +    outputs =
    +      case d ^. outputArg of
    +        []  -> "ControlNode"
    +        [o] -> wrappedOutput o <+> "-- ^" <+> argComment o
    +        os  -> renderTupleResult os
    +    wrappedOutput = wrapArg "Value"
    +    -- Tuple result case is rendered differently to give
    +    -- individual elements their own comments.
    +    renderTupleResult os =
    +        stack $ [ tuple (map wrappedOutput os)
    +                , flatten commentSummary
    +                ] ++ map commentDetails os
    +      where
    +        commentSummary = "-- ^" <+> tuple [bold (o ^. name) | o <- os]
    +        commentDetails o =
    +          stack [ "--"
    +                , "-- *" <+> argComment o
    +                ]
    +    signatureFold = folddoc (\x y -> x </> "->" <+> y)
    +    mandatoryAttrInputs = [
    +      dtTypeToDoc dtType <+>
    +          hang 0 ("-- ^" <+> argComment' tfName descr)
    +      | (tfName, (_, dtType, descr)) <- Map.toList $ mandatoryAttrMap d]
    +    typeMap = opDefTypeMap d
    +
    +-- | Returns the type restriction for the given tensor type if the
    +-- set of allowed types is not empty (i.e. restricted).
    +oneOfRestrictions :: AttrDef -> Text -> Maybe Doc
    +oneOfRestrictions aDef tName = do
    +    typs <- onAttrType (^. templateRestrictions) aDef
    +    guard $ not $ null typs
    +    let typeList = commasep $ map strictText $
    +                   Set.toList $ Set.fromList $
    +                   map dtTypeToHaskell typs
    +    return $ "OneOf" <+> "'" <> brackets typeList <+> strictText tName
    +
    +-- | Identifies the attributes used as tensor cardinalities. In such
    +-- cases a list of tensors is supplied as an input_arg. The number of
    +-- such inputs is communicated as a separate opAttr.
    +-- The result key is TensorFlow attribute name and the value is the
    +-- tensor names which have number_attr set to the result key.
    +numberAttrMap :: OpDef -> Map.Map Text.Text (NonEmpty Text.Text)
    +numberAttrMap d = Map.fromListWith (Semigroup.<>) [
    +    (nAttr, replaceReservedName (inp ^. name) :| [])
    +    | inp <- d ^. inputArg
    +    , nAttr <- [inp ^. numberAttr]
    +    , not (Text.null nAttr)
    +    ]
    +
    +argComment :: OpDef'ArgDef -> Doc
    +argComment arg = argComment' (arg ^. name) (arg ^. description)
    +
    +argComment' :: Text.Text -> Text.Text -> Doc
    +argComment' argName argDesc =
    +    bold argName <> splitMultilineText (":" <+>) argDesc
    +
    +bold :: Text.Text -> Doc
    +bold n = strictText ("__" <> n <> "__")
    +
    +opDefTypeMap :: OpDef -> Map.Map Text.Text (Text.Text, AttrDef)
    +opDefTypeMap d =
    +    Map.fromList [(n, (lowCase n, a)) | (n, a) <- attrList d, isType a]
    +
    +attrList :: OpDef -> [(Text.Text, AttrDef)]
    +attrList d = [(a ^. name, attrDef a) | a <- d ^. attr]
    +
    +isType :: AttrDef -> Bool
    +isType = fromMaybe False . onAttrType (const True)
    +
    +-- | Applies the given function to the data type. Is this a Prism?
    +onAttrType :: (Template DataType -> a) -> AttrDef -> Maybe a
    +onAttrType f x = case x ^. attrTemplate of
    +    AttrSingle (AttrType a) -> Just (f a)
    +    _ -> Nothing
    +
    +-- | mandatoryAttrMap contains the attributes chosen by
    +-- isMandatoryAttr, excluding those which are derived from list of
    +-- tensor arguments. The key is the TF name of the attribute. The
    +-- value tuple is (haskell name, TF type, attribute comment).
    +mandatoryAttrMap :: OpDef -> Map.Map Text.Text (Text.Text, DataType, Text.Text)
    +mandatoryAttrMap d =
    +    Map.fromList [ (n, (lowCase n, dtType, a ^. attrOriginal.description))
    +                 | (n, a) <- attrList d
    +                 , Just dtType <- [isMandatoryAttr a]
    +                 -- Excludes the attributes rendered as list lengths.
    +                 , n `Map.notMember` numberAttrMap d
    +                 ]
    +
    +-- | Inspects the attribute and if it is one of the implemented
    +-- non-tensor values lacking default, then returns Just the TF type.
    +isMandatoryAttr :: AttrDef -> Maybe DataType
    +isMandatoryAttr x =
    +   case x ^. attrTemplate of
    +     AttrSingle (AttrBool y)  -> noDefault DT_BOOL y
    +     AttrSingle (AttrInt64 y) -> noDefault DT_INT64 y
    +     AttrSingle (AttrFloat y) -> noDefault DT_FLOAT y
    +     _ -> Nothing
    +   where
    +     noDefault typ y = maybe (Just typ) (const Nothing) (y ^. templateDefault)
    +
    +dtTypeToDoc :: DataType -> Doc
    +dtTypeToDoc = strictText . dtTypeToHaskell
    +
    +-- NOTE: The cases of this function should be kept in sync with
    +-- TensorFlow.Types.AllTensorTypes.
    +dtTypeToHaskell :: DataType -> Text.Text
    +dtTypeToHaskell DT_BOOL = "Bool"
    +dtTypeToHaskell DT_BFLOAT16 = "Data.Word.Word16"
    +dtTypeToHaskell DT_COMPLEX128 = "(Data.Complex.Complex Double)"
    +dtTypeToHaskell DT_COMPLEX64 = "(Data.Complex.Complex Float)"
    +dtTypeToHaskell DT_DOUBLE = "Double"
    +dtTypeToHaskell DT_FLOAT = "Float"
    +dtTypeToHaskell DT_INT16 = "Data.Int.Int16"
    +dtTypeToHaskell DT_INT32 = "Data.Int.Int32"
    +dtTypeToHaskell DT_INT64 = "Data.Int.Int64"
    +dtTypeToHaskell DT_INT8 = "Data.Int.Int8"
    +dtTypeToHaskell DT_QINT32 = "Data.Int.Int32"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_QINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_QINT16 = "Data.Int.Int16"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_QUINT16 = "Data.Word.Word16"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_QUINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_STRING = "Data.ByteString.ByteString"
    +dtTypeToHaskell DT_UINT16 = "Data.Word.Word16"
    +dtTypeToHaskell DT_HALF = "Data.Word.Word16"  -- TODO(gnezdo): make unique
    +dtTypeToHaskell DT_UINT8 = "Data.Word.Word8"
    +dtTypeToHaskell x =
    +    Text.pack $ "Unsupported type in dtTypeToHaskell: " ++ show x
    +
    +-- | haddockComment escapes TensorFlow doc strings into haddock.
    +-- TODO(gnezdo): deal with the markup.
    +haddockComment :: Text.Text -> Doc
    +haddockComment = strictText
    +
    +multilineComment :: Text.Text -> Text.Text -> Doc
    +multilineComment summary' detail =
    +    haddockComment summary' </>
    +    splitMultilineText insertParagraphAndComment detail
    +  where insertParagraphAndComment x = "--" </> "--" <+> x
    +
    +-- | Converts the given multi-line detail string into
    +-- a multi-line haddock. Applies the given lead to the
    +-- first line. Returns an empty document for empty detail.
    +splitMultilineText :: (Doc -> Doc) -> Text.Text -> Doc
    +splitMultilineText lead detail =
    +  case Text.lines detail of
    +    [] -> empty
    +    (l : ls) -> stack $ lead (haddockComment l)
    +                      : map (("--" <+>) . haddockComment) ls
    +
    +replaceReservedName :: Text -> Text
    +replaceReservedName n
    +    | n `Set.member` reservedKeywords = n <> "'"
    +    | otherwise = n
    +
    +indentation = 4
    +
    +reservedKeywords :: Set.Set Text
    +reservedKeywords = Set.fromList $
    +    -- Haskell2010 keywords:
    +    -- https://www.haskell.org/onlinereport/haskell2010/haskellch2.html#x7-180002.4
    +    -- We don't include keywords that are allowed to be variable names,
    +    -- in particular: "as", "forall", and "hiding".
    +    [ "case"
    +    , "class"
    +    , "data"
    +    , "default"
    +    , "deriving"
    +    , "do"
    +    , "else"
    +    , "foreign"
    +    , "if"
    +    , "import"
    +    , "in"
    +    , "infix"
    +    , "infixl"
    +    , "infixr"
    +    , "instance"
    +    , "let"
    +    , "module"
    +    , "newtype"
    +    , "of"
    +    , "then"
    +    , "type"
    +    , "where"
    +    ]
    +    ++  -- Nonstandard extensions
    +    [ "mdo"   -- RecursiveDo
    +    , "rec"   -- Arrows, RecursiveDo
    +    , "proc"  -- Arrows
    +    ]
    +
    + diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-opgen-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt b/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt new file mode 100644 index 0000000..a81a0f9 --- /dev/null +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt @@ -0,0 +1,48 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Code generation for TensorFlow operations. +-- +-- Please see README.md +@package tensorflow-opgen +@version 0.1.0.0 + + +-- | Wrapping of TensorFlow attributes into Haskell entities. +module TensorFlow.OpGen.AttrVal +data AttrDef +data AttrCase f +AttrBytes :: (f ByteString) -> AttrCase f +AttrInt64 :: (f Int64) -> AttrCase f +AttrFloat :: (f Float) -> AttrCase f +AttrBool :: (f Bool) -> AttrCase f +AttrType :: (f DataType) -> AttrCase f +AttrShape :: (f TensorShapeProto) -> AttrCase f + +-- | Type-reified representation of TensorFlow AttrDef. Initially limited +-- to just the types in Op descriptors. +data AttrTemplate +AttrSingle :: (AttrCase Template) -> AttrTemplate +AttrList :: (AttrCase []) -> AttrTemplate +AttrTensor :: UnusedTensor -> AttrTemplate + +-- | Specifies the optional default value and a set of allowed values for +-- the given type. +data Template a +attrDef :: OpDef'AttrDef -> AttrDef +attrOriginal :: Lens' AttrDef OpDef'AttrDef +attrTemplate :: Lens' AttrDef AttrTemplate +templateDefault :: Lens' (Template a) (Maybe a) +templateRestrictions :: Lens' (Template a) [a] + + +-- | Rendering of TensorFlow operations as Haskell functions. +module TensorFlow.OpGen +data OpGenFlags +OpGenFlags :: String -> String -> String -> OpGenFlags +[outputFile] :: OpGenFlags -> String +[prefix] :: OpGenFlags -> String +[excludeList] :: OpGenFlags -> String +docOpList :: OpGenFlags -> OpList -> Doc +flagParser :: Parser OpGenFlags diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html new file mode 100644 index 0000000..5e59fe3 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html @@ -0,0 +1,16 @@ +TensorFlow.EmbeddingOps

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.EmbeddingOps

    Description

    Parallel lookups on the list of tensors.

    Synopsis

    Documentation

    embeddingLookup Source

    Arguments

    :: (TensorType a, OneOf `[Int64, Int32]` b, Num b) 
    => [Tensor v a]

    A list of tensors which can be concatenated along + dimension 0. Each Tensor must be appropriately + sized for mod partition strategy.

    -> Tensor Value b

    A Tensor with type int32 or int64 + containing the ids to be looked up in params. + The ids are required to be flat on entry and have + fewer than 2^31 entries.

    -> Build (Tensor Value a)

    A dense tensor with shape `shape(ids) + shape(params)[1:]`.

    Looks up ids in a list of embedding tensors.

    This function is used to perform parallel lookups on the list of + tensors in params. It is a generalization of gather, where + params is interpreted as a partition of a larger embedding + tensor.

    The partition_strategy is "mod", we assign each id to partition + `p = id % len(params)`. For instance, + 13 ids are split across 5 partitions as: + `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`

    The results of the lookup are concatenated into a dense + tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html new file mode 100644 index 0000000..952f801 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html @@ -0,0 +1,4 @@ +TensorFlow.Gradient

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Gradient

    Synopsis

    Documentation

    gradients Source

    Arguments

    :: (Num (Tensor v1 a), v1 ~ Value, GradientCompatible a) 
    => Tensor v1 a

    The output of the graph.

    -> [Tensor v2 a]

    Tensors for which gradients are computed.

    -> Build [Tensor Value a] 

    Gradient of y w.r.t. each element of xs.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html new file mode 100644 index 0000000..919f1fe --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html @@ -0,0 +1,122 @@ +TensorFlow.Ops

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Ops

    Description

    This module contains definitions for some built-in TensorFlow operations.

    Note that certain, "stateful" ops like variable and assign return a + Build action (e.g., Build (Tensor Ref a) instead of a pure value; the + returned Tensors are always rendered in the current Build context. This + approach helps us avoid problems with inlining or common subexpression + elimination, by writing

    do
    +    v <- variable []
    +    w <- assign v 3
    +    render $ w * w

    instead of

    let
    +   v = variable []
    +   w = assign v 3
    +in w * w

    since the latter could be reasonably transformed by the compiler into (or + vice versa)

    let
    +   v = variable []
    +   w = assign v 3
    +   w' = assign v 3
    +in w * w'

    Ops should return a Build action if their original OpDef marks them as + stateful, or if they take any Refs as input. (This mirrors the rules that + TensorFlow uses to avoid common subexpression elimination.)

    Synopsis

    Documentation

    add

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

    abs

    Arguments

    :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute + value of each element in x. For example, if x is an input element and y is + an output element, this operation computes \(y = |x|\).

    addN

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) 
    => [Tensor v1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Value t

    sum

    Add all input tensors element wise.

    argMax

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 t

    input

    -> Tensor v2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Value Int64

    output

    Returns the index with the largest value across dimensions of a tensor.

    assign :: forall a v. TensorType a => Tensor Ref a -> Tensor v a -> Build (Tensor Ref a) Source

    broadcastGradientArgs

    Arguments

    :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) 
    => Tensor v1 t

    s0

    -> Tensor v2 t

    s1

    -> (Tensor Value t, Tensor Value t)

    (r0, r1)

    • r0
    • r1

    Return the reduction indices for computing gradients of s0 op s1 with broadcast.

    This is typically used by gradient computations for a broadcasting operation.

    cast

    Arguments

    :: (TensorType dstT, TensorType srcT) 
    => Tensor v1 srcT

    x

    -> Tensor Value dstT

    y

    Cast x of type SrcT to y of DstT.

    concat

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

    -> [Tensor v2 t]

    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

    -> Tensor Value t

    output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

    Concatenates tensors along one dimension.

    constant :: forall a. TensorType a => Shape -> [a] -> Tensor Value a Source

    Create a constant tensor.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) + element 1: index (0, ..., 1) + ...

    initializedVariable :: forall a. TensorType a => Tensor Value a -> Build (Tensor Ref a) Source

    Creates a variable initialized to the given value. + Initialization happens next time session runs.

    zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a) Source

    Creates a zero-initialized variable with the given shape.

    fill

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    dims: 1-D. Represents the shape of the output tensor.

    -> Tensor v2 t

    value: 0-D (scalar). Value to fill the returned tensor.

    -> Tensor Value t

    output

    Creates a tensor filled with a scalar value.

    This operation creates a tensor of shape dims and fills it with value.

    For example:

    ```prettyprint + # Output tensor has shape [2, 3]. + fill([2, 3], 9) ==> [[9, 9, 9] + [9, 9, 9]] + ```

    matMul

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    b

    -> Tensor Value t

    product

    Multiply the matrix "a" by the matrix "b".

    The inputs must be two-dimensional matrices and the inner dimension of + "a" (after being transposed if transpose_a is true) must match the + outer dimension of "b" (after being transposed if transposed_b is + true).

    • Note*: The default kernel implementation for MatMul on GPUs uses + cublas.

    matTranspose :: forall a v. TensorType a => Tensor v a -> Tensor Value a Source

    mul

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x * y element-wise.

    • NOTE*: Mul supports broadcasting. More about broadcasting + here

    neg

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes numerical negative value element-wise.

    I.e., \(y = -x\).

    pack

    Arguments

    :: TensorType t 
    => [Tensor v1 t]

    values: Must be of same shape and type.

    -> Tensor Value t

    output: The packed tensor.

    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

    Packs the N tensors in values into a tensor with rank one higher than each + tensor in values, by packing them along the axis dimension. + Given a list of tensors of shape `(A, B, C)`;

    if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. + if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. + Etc.

    For example:

    ```prettyprint + # x is [1, 4] + # y is [2, 5] + # z is [3, 6] + pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + ```

    This is the opposite of unpack.

    range

    Arguments

    :: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 tidx

    start: 0-D (scalar). First entry in the sequence.

    -> Tensor v2 tidx

    limit: 0-D (scalar). Upper limit of sequence, exclusive.

    -> Tensor v3 tidx

    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

    -> Tensor Value tidx

    output: 1-D.

    Creates a sequence of integers.

    This operation creates a sequence of integers that begins at start and + extends by increments of delta up to but not including limit.

    For example:

    ``` + # start is 3 + # limit is 18 + # delta is 3 + tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + ```

    reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32 Source

    Helper function for reduction ops (translation of math_ops.reduced_shape).

    relu

    Arguments

    :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes rectified linear: `max(features, 0)`.

    reluGrad

    Arguments

    :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding Relu operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

    -> Tensor Value t

    backprops: `gradients * (features > 0)`.

    Computes rectified linear gradients for a Relu operation.

    reshape

    Arguments

    :: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
    => Tensor v1 t

    tensor

    -> Tensor v2 tshape

    shape: Defines the shape of the output tensor.

    -> Tensor Value t

    output

    Reshapes a tensor.

    Given tensor, this operation returns a tensor that has the same values + as tensor with shape shape.

    If one component of shape is the special value -1, the size of that dimension + is computed so that the total size remains constant. In particular, a shape + of `[-1]` flattens into 1-D. At most one component of shape can be -1.

    If shape is 1-D or higher, then the operation returns a tensor with shape + shape filled with the values of tensor. In this case, the number of elements + implied by shape must be the same as the number of elements in tensor.

    For example:

    ```prettyprint + # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] + # tensor t has shape [9] + reshape(t, [3, 3]) ==> [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]

    # tensor t is [[[1, 1], [2, 2]], + # [[3, 3], [4, 4]]] + # tensor t has shape [2, 2, 2] + reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + [3, 3, 4, 4]]

    # tensor t is [[[1, 1, 1], + # [2, 2, 2]], + # [[3, 3, 3], + # [4, 4, 4]], + # [[5, 5, 5], + # [6, 6, 6]]] + # tensor t has shape [3, 2, 3] + # pass '[-1]' to flatten t + reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

    # -1 can also be used to infer the shape

    # -1 is inferred to be 9: + reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 2: + reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 3: + reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + [2, 2, 2], + [3, 3, 3]], + [[4, 4, 4], + [5, 5, 5], + [6, 6, 6]]]

    # tensor t is [7] + # shape `[]` reshapes to a scalar + reshape(t, []) ==> 7 + ```

    restore Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> Tensor Ref a

    Tensor to restore.

    -> Build ControlNode 

    Restore a tensor's value from a checkpoint file.

    restoreFromName Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> ByteString

    Tensor name override.

    -> Tensor Ref a

    Tensor to restore.

    -> Build ControlNode 

    Restore a tensor's value from a checkpoint file.

    This version allows restoring from a checkpoint file that uses a different + tensor name than the variable.

    save Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> [Tensor v a]

    Tensors to save.

    -> Build ControlNode 

    scalar :: forall a. TensorType a => a -> Tensor Value a Source

    Create a constant scalar.

    sign

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Returns an element-wise indication of the sign of a number.

    `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

    For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

    size

    Arguments

    :: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
    => Tensor v1 t

    input

    -> Tensor Value out_type

    output

    Returns the size of a tensor.

    This operation returns an integer representing the number of elements in + input.

    For example:

    ```prettyprint + # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + size(t) ==> 12 + ```

    softmax

    Arguments

    :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) 
    => Tensor v1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Value t

    softmax: Same shape as logits.

    Computes softmax activations.

    For each batch i and class j we have

    softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

    softmaxCrossEntropyWithLogits

    Arguments

    :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) 
    => Tensor v1 t

    features: batch_size x num_classes matrix

    -> Tensor v2 t

    labels: batch_size x num_classes matrix + The caller must ensure that each batch of labels represents a valid + probability distribution.

    -> (Tensor Value t, Tensor Value t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    Computes softmax cross entropy cost and gradients to backpropagate.

    Inputs are the logits, not probabilities.

    sparseToDense

    Arguments

    :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
    => Tensor v1 tindices

    sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

    -> Tensor v2 tindices

    output_shape: 1-D. Shape of the dense output tensor.

    -> Tensor v3 t

    sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

    -> Tensor v4 t

    default_value: Scalar value to set for indices not specified in + sparse_indices.

    -> Tensor Value t

    dense: Dense output tensor of shape output_shape.

    Converts a sparse representation into a dense tensor.

    Builds an array dense with shape output_shape such that

    ```prettyprint + # If sparse_indices is scalar + dense[i] = (i == sparse_indices ? sparse_values : default_value)

    # If sparse_indices is a vector, then for each i + dense[sparse_indices[i]] = sparse_values[i]

    # If sparse_indices is an n by d matrix, then for each i in [0, n) + dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + ```

    All other values in dense are set to default_value. If sparse_values is a + scalar, all sparse indices are set to this single value.

    Indices should be sorted in lexicographic order, and indices must not + contain any repeats. If validate_indices is true, these properties + are checked during execution.

    sub

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x - y element-wise.

    • NOTE*: Sub supports broadcasting. More about broadcasting + here

    sum

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the sum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    topK

    Arguments

    :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) 
    => Int64

    k: Number of top elements to look for along the last dimension (along each + row for matrices).

    -> Tensor v1 t

    input: 1-D or higher with last dimension at least k.

    -> (Tensor Value t, Tensor Value Int32)

    (values, indices)

    • values: The k largest elements along each last dimensional slice.
    • indices: The indices of values within the last dimension of input.

    Finds values and indices of the k largest elements for the last dimension.

    If the input is a vector (rank-1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + j-th largest entry in input, and its index is `indices[j]`.

    For matrices (resp. higher rank input), computes the top k entries in each + row (resp. vector along the last dimension). Thus,

    values.shape = indices.shape = input.shape[:-1] + [k]

    If two elements are equal, the lower-index element appears first.

    If k varies dynamically, use TopKV2 below.

    transpose

    Arguments

    :: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
    => Tensor v1 t

    x

    -> Tensor v2 tperm

    perm

    -> Tensor Value t

    y

    Shuffle dimensions of x according to a permutation.

    The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

    truncatedNormal Source

    Arguments

    :: TensorType a 
    => Tensor v Int64

    Shape.

    -> Build (Tensor Value a) 

    variable :: forall a. TensorType a => Shape -> Build (Tensor Ref a) Source

    Create a new, uninitialized stateful Tensor of the given shape.

    vector :: TensorType a => [a] -> Tensor Value a Source

    Create a constant vector.

    zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Value a Source

    zerosLike

    Arguments

    :: TensorType t 
    => Tensor v1 t

    x: a tensor of type T.

    -> Tensor Value t

    y: a tensor of the same shape and type as x but filled with zeros.

    Returns a tensor of zeros with the same shape and type as x.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html new file mode 100644 index 0000000..e9f2531 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings. (Index)

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/frames.html b/docs/haddock/tensorflow-ops-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-ops-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings. \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/index.html b/docs/haddock/tensorflow-ops-0.1.0.0/index.html new file mode 100644 index 0000000..ef676e3 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Please see README.md

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html new file mode 100644 index 0000000..6bb59df --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-EmbeddingOps.html @@ -0,0 +1,4 @@ +TensorFlow.EmbeddingOps

    TensorFlow.EmbeddingOps

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html new file mode 100644 index 0000000..d309aff --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Gradient.html @@ -0,0 +1,4 @@ +TensorFlow.Gradient

    TensorFlow.Gradient

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html new file mode 100644 index 0000000..bc6c10a --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html @@ -0,0 +1,4 @@ +TensorFlow.Ops

    TensorFlow.Ops

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/minus.gif b/docs/haddock/tensorflow-ops-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/plus.gif b/docs/haddock/tensorflow-ops-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html new file mode 100644 index 0000000..e97c085 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html @@ -0,0 +1,87 @@ + + + + + +src/TensorFlow/EmbeddingOps.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE NoMonomorphismRestriction #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +
    +-- | Parallel lookups on the list of tensors.
    +module TensorFlow.EmbeddingOps where
    +
    +import Control.Monad (zipWithM)
    +import Data.Int (Int32, Int64)
    +import Data.List (genericLength)
    +import TensorFlow.Build (Build, colocateWith, render)
    +import TensorFlow.Ops ()  -- Num instance for Tensor
    +import TensorFlow.Tensor (Tensor, Value)
    +import TensorFlow.Types (OneOf, TensorType)
    +import qualified TensorFlow.GenOps.Core as CoreOps
    +
    +-- | Looks up `ids` in a list of embedding tensors.
    +--
    +-- This function is used to perform parallel lookups on the list of
    +-- tensors in `params`.  It is a generalization of `TF.gather`, where
    +-- `params` is interpreted as a partition of a larger embedding
    +-- tensor.
    +--
    +-- The partition_strategy is "mod", we assign each id to partition
    +-- `p = id % len(params)`. For instance,
    +-- 13 ids are split across 5 partitions as:
    +-- `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
    +--
    +-- The results of the lookup are concatenated into a dense
    +-- tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
    +embeddingLookup :: forall a b v .
    +                   ( TensorType a
    +                   , OneOf '[Int64, Int32] b
    +                   , Num b
    +                   )
    +                => [Tensor v a]
    +                -- ^ A list of tensors which can be concatenated along
    +                -- dimension 0. Each `Tensor` must be appropriately
    +                -- sized for `mod` partition strategy.
    +                -> Tensor Value b
    +                -- ^ A `Tensor` with type `int32` or `int64`
    +                -- containing the ids to be looked up in `params`.
    +                -- The ids are required to be flat on entry and have
    +                -- fewer than 2^31 entries.
    +                -> Build (Tensor Value a)
    +                -- ^ A dense tensor with shape `shape(ids) + shape(params)[1:]`.
    +embeddingLookup params ids =
    +    CoreOps.dynamicStitch pindices <$> partitionedResult
    +  where np = genericLength params
    +        pAssignments = CoreOps.cast (ids `CoreOps.mod` np)
    +        newIds = ids `CoreOps.div` np
    +        originalIndices = CoreOps.range 0 (CoreOps.size ids) 1
    +        -- Partition list of ids based on assignments into np separate lists
    +        gatherIds = CoreOps.dynamicPartition np newIds pAssignments
    +        -- Similarly, partition the original indices.
    +        pindices = CoreOps.dynamicPartition np originalIndices pAssignments
    +        -- Do np separate lookups, finding embeddings for plist[p] in params[p]
    +        partitionedResult = zipWithM
    +                            (\p g -> colocateWith p $ render $ CoreOps.gather p g)
    +                            params gatherIds
    +
    + diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html new file mode 100644 index 0000000..9f35a43 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html @@ -0,0 +1,708 @@ + + + + + +src/TensorFlow/Gradient.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleContexts #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE ViewPatterns #-}
    +
    +module TensorFlow.Gradient
    +    ( gradients
    +    ) where
    +
    +import Control.Monad (forM, zipWithM)
    +import Control.Monad.State.Strict (State, evalState, gets, modify)
    +import Data.ByteString (ByteString)
    +import Data.Complex (Complex)
    +import Data.Default (def)
    +import Data.Int (Int32, Int64)
    +import Data.List (foldl', sortBy)
    +import Data.Map.Strict (Map)
    +import Data.Maybe (fromMaybe, maybeToList, mapMaybe)
    +import Data.Ord (comparing)
    +import Data.ProtoLens.TextFormat (showMessage)
    +import Data.Set (Set)
    +import Data.Text (Text)
    +import Data.Tuple (swap)
    +import Lens.Family2 (Lens', (&), (^.), (.~), (%~))
    +import Lens.Family2.State.Strict (uses)
    +import Lens.Family2.Stock (at, intAt)
    +import Lens.Family2.Unchecked (lens, iso)
    +import Prelude hiding (sum)
    +import Text.Printf (printf)
    +import qualified Data.Graph.Inductive.Basic as FGL
    +import qualified Data.Graph.Inductive.Graph as FGL
    +import qualified Data.Graph.Inductive.PatriciaTree as FGL
    +import qualified Data.Graph.Inductive.Query.DFS as FGL
    +import qualified Data.IntMap.Strict as IntMap
    +import qualified Data.Map.Strict as Map
    +import qualified Data.Set as Set
    +import qualified Data.Text as Text
    +
    +import qualified TensorFlow.GenOps.Core as CoreOps
    +import TensorFlow.Build
    +    ( Build
    +    , render
    +    , renderNodeName
    +    , renderedNodeDefs
    +    , opDef
    +    , opAttr
    +    )
    +import TensorFlow.BuildOp
    +import TensorFlow.Ops
    +    ( addN
    +    , broadcastGradientArgs
    +    , expandDims
    +    , fill
    +    , matMul
    +    , reducedShape
    +    , reluGrad
    +    , reshape
    +    , scalar
    +    , shape
    +    , softmaxCrossEntropyWithLogits
    +    , sum
    +    , vector
    +    , zerosLike
    +    )
    +import TensorFlow.Output
    +    ( NodeName(..)
    +    , Op (Rendered)
    +    , Output(..)
    +    , OutputIx(..)
    +    , outputIndex
    +    )
    +import TensorFlow.Tensor
    +    ( Tensor(..)
    +    , TensorKind (ValueKind)
    +    , Value
    +    , tensorOutput
    +    , tensorAttr
    +    )
    +import TensorFlow.Types (OneOf, TensorType, attrLens)
    +import Proto.Tensorflow.Core.Framework.NodeDef
    +    (NodeDef, attr, input, op, name)
    +
    +type GradientCompatible a =
    +    -- TODO(fmayle): MaxPoolGrad doesn't support Double for some reason.
    +    (Num a, OneOf '[ Float, Complex Float, Complex Double ] a)
    +
    +-- TODO(fmayle): Support control flow.
    +-- TODO(fmayle): Support gate_gradients-like option to avoid race conditions.
    +-- TODO(fmayle): Do we need to consider control inputs? See _PendingCount in
    +-- tensorflow/python/ops/gradients.py.
    +-- TODO(fmayle): Maybe store the gradient functions and numOutputs on the OpDef.
    +
    +
    +-- | Gradient of @y@ w.r.t. each element of @xs@.
    +gradients :: forall a v1 v2 . ( Num (Tensor v1 a)
    +                                -- TODO(gnezdo): remove indirect constraint.
    +                               -- It's a wart inherited from Num instance.
    +                              , v1 ~ Value
    +                              , GradientCompatible a
    +                              )
    +          => Tensor v1 a  -- ^ The output of the graph.
    +          -> [Tensor v2 a]  -- ^ Tensors for which gradients are computed.
    +          -> Build [Tensor Value a]
    +gradients y xs = do
    +    -- The gradients are computed using "reverse accumulation", similarly to
    +    -- what is described here:
    +    -- https://en.wikipedia.org/wiki/Automatic_differentiation#The_chain_rule.2C_forward_and_reverse_accumulation
    +    --
    +    -- The code is summarised as follows:
    +    --
    +    -- 1. Create an fgl graph of the relevant nodes (ops) and edges (tensors).
    +    -- 2. Initialize the gradient of y to 1 (∂y/∂y = 1) and the rest of tensor's
    +    --    gradients to nothing.
    +    -- 3. Process the nodes in reverse topological order (i.e. each node comes
    +    --    after all of its outputs so that the output gradients for a node have
    +    --    been completely calculated before it is processed):
    +    --      a. Record the gradient for each of the node's output tensors (∂y/∂w
    +    --         for each output tensor w).
    +    --      b. Calculate the gradient of y w.r.t. each of the node's input
    +    --         tensors using the gradients of the node's output tensors.
    +    --
    +    --         Written differently, for each output tensor w and input tensor v:
    +    --           ∂y/∂w = ...            (calculated in previous steps)
    +    --           ∂w/∂v = ...            (op specific)
    +    --           ∂y/∂v = ∂y/∂w * ∂w/∂v  (technically, if tensor v is an input
    +    --                                   to multiple nodes, then this is only
    +    --                                   part of ∂y/∂v)
    +    --
    +    -- 4. Lookup the recorded gradient for each x in xs.
    +
    +    yName <- renderNodeName y
    +    -- TODO(fmayle): Move this into Build.hs and call it unsafeNodeDefFromName?
    +    nodeDefLookup :: (NodeName -> NodeDef) <- uses renderedNodeDefs $
    +        (\f x -> fromMaybe (error $ "no NodeDef found for " ++ show x) (f x))
    +        . flip Map.lookup
    +    let (gr, nodeMap) = createGraph yName nodeDefLookup
    +    -- Set gradient of y to one.
    +    let initPending :: Map.Map FGL.Node (PendingGradients a)
    +        initPending = Map.empty & at (nodeMap Map.! yName)
    +                                . nonEmpty
    +                                . outputIxAt (y ^. tensorOutput . outputIndex)
    +                                . nonEmpty
    +                                .~ [fill (shape y) (scalar 1)]
    +    -- Calculate the gradients of y w.r.t. each node in the graph.
    +    gradientMap <- graphGrads gr initPending
    +    -- Lookup the gradients for each x.
    +    forM xs $ \x -> do
    +        xName <- renderNodeName x
    +        render $ fromMaybe (zerosLike x) $ do
    +            n <- nodeMap ^. at xName
    +            let i = x ^. tensorOutput . outputIndex
    +            gradientMap ^. at n . nonEmpty . outputIxAt i
    +
    +outputIxAt :: OutputIx -> Lens' (IntMap.IntMap v) (Maybe v)
    +outputIxAt = intAt . unOutputIx
    +
    +-- | Incomplete gradients of a node's outputs.
    +--
    +-- The lists represent partial sums. The key is an OutputIx sans newtype.
    +type PendingGradients a = IntMap.IntMap [Tensor Value a]
    +
    +-- | Gradients of a node's outputs. The key is an OutputIx sans newtype.
    +type Gradients a = IntMap.IntMap (Tensor Value a)
    +
    +-- | Graph of TensorFlow operations.
    +type Graph = FGL.Gr NodeDef EdgeLabel
    +
    +-- | Data associated with an edge.
    +--
    +-- Pair of
    +--   1. Output index of a tensor from the source node.
    +--   2. Input index that the tensor connects to on the destination node.
    +type EdgeLabel = (OutputIx, OutputIx)
    +
    +
    +-- | State used for calculating gradients.
    +data GradientsState a = GradientsState
    +                      { _gradientsPending :: !(Map FGL.Node (PendingGradients a))
    +                      , _gradientsResult  :: !(Map FGL.Node (Gradients a))
    +                      }
    +
    +gradientsPending :: Lens' (GradientsState a) (Map FGL.Node (PendingGradients a))
    +gradientsPending = lens _gradientsPending (\x y -> x { _gradientsPending = y })
    +
    +gradientsResult :: Lens' (GradientsState a) (Map FGL.Node (Gradients a))
    +gradientsResult = lens _gradientsResult (\x y -> x { _gradientsResult = y })
    +
    +
    +-- TODO(fmayle): Use something like Data.List.Safe.
    +-- | Safe version of (!!).
    +safeIndex :: [a] -> Int -> Maybe a
    +_      `safeIndex` n | n < 0 = Nothing
    +[]     `safeIndex` _         = Nothing
    +(x:_)  `safeIndex` 0         = Just x
    +(_:xs) `safeIndex` n         = xs `safeIndex` (n-1)
    +
    +-- Copy of http://hackage.haskell.org/package/lens-3.9.0.2/docs/Control-Lens-Iso.html#v%3anon
    +anon :: a -> (a -> Bool) -> Lens' (Maybe a) a
    +anon a p = iso (fromMaybe a) go where
    +  go b | p b       = Nothing
    +       | otherwise = Just b
    +
    +non :: Eq a => a -> Lens' (Maybe a) a
    +non a = anon a (a==)
    +
    +-- | Lens that defaults Nothing to mempty.
    +nonEmpty :: (Monoid (t v), Foldable t) => Lens' (Maybe (t v)) (t v)
    +nonEmpty = anon mempty null
    +
    +-- | Calculate the gradients for every node in a graph.
    +graphGrads :: forall a. GradientCompatible a
    +           => Graph
    +           -> Map FGL.Node (PendingGradients a)
    +           -- ^ Initial gradients (usually just 1 for the node of interest).
    +           -> Build (Map FGL.Node (Gradients a))
    +graphGrads gr initPending = pure (foldl' go initState nodeOrder ^. gradientsResult)
    +  where
    +    initState = GradientsState initPending Map.empty
    +    -- Reverse topological sort.
    +    -- TODO(fmayle): Filter out nodes that are not successors of any x in xs to
    +    -- avoid calculating gradients that won't be used.
    +    nodeOrder = FGL.topsort $ FGL.grev gr
    +    go state node =
    +        -- Aggregate the accumulated gradients for this node.
    +        let outputGrads =
    +                sumPendingGradient (state ^. gradientsPending . at node . nonEmpty)
    +        in if null outputGrads
    +           then state
    +           else
    +              -- Calculate the gradients for each of the node's inputs.
    +              let nextState = state & gradientsResult %~ Map.insert node outputGrads
    +                  ctx = FGL.context gr node
    +              in updatePendingGradients
    +                 ctx
    +                 (calculateInputGrads ctx outputGrads gr)
    +                 nextState
    +
    +-- | Reduce accumulated gradients for each output to one Tensor.
    +sumPendingGradient :: GradientCompatible a
    +                   => PendingGradients a -> Gradients a
    +sumPendingGradient = IntMap.mapMaybe f
    +  where
    +    f [] = Nothing
    +    f [x] = Just x
    +    f xs = Just (addN xs)
    +
    +
    +-- | Calculate the gradients of a node's input tensors.
    +--
    +-- This is mostly just a wrapper around opGrad.
    +calculateInputGrads :: forall a. GradientCompatible a
    +                    => FGL.Context NodeDef EdgeLabel
    +                    -> Gradients a  -- ^ Output gradients of the node.
    +                    -> Graph
    +                    -> [Maybe (Tensor Value a)]
    +calculateInputGrads (inputEdges, _, nodeDef, _) outputGrads gr =
    +    opGrad (nodeDef ^. op) nodeDef inputTensors fullOutGrads
    +  where
    +    fullOutGrads =
    +        fullOutputGrads (numOutputs nodeDef) (Rendered nodeDef) outputGrads
    +    -- Create a tensor from an edge (technically an Output, but it seems less
    +    -- confusing to refer to it as a tensor here).
    +    edgeToTensor :: (EdgeLabel, FGL.Node) -> Output
    +    edgeToTensor ((i, _), n) =
    +        case FGL.lab gr n of
    +            Just edgeNodeDef -> Output i (Rendered edgeNodeDef)
    +            Nothing -> error $ "calculateInputGrads: missing input node for "
    +                               ++ Text.unpack (nodeDef ^. name)
    +    -- Input tensors, sorted by input index.
    +    inputTensors = map edgeToTensor $ sortBy (comparing (snd . fst)) inputEdges
    +
    +-- | Convert a Map of gradients to a list, with zeros for missing outputs.
    +fullOutputGrads :: (TensorType a, Num a)
    +                => OutputIx  -- ^ Number of outputs.
    +                -> Op
    +                -> Gradients a
    +                -> [Tensor Value a]
    +fullOutputGrads n o gs =
    +    map (\i -> fromMaybe (zero i) (gs ^. outputIxAt i)) [0..n-1]
    +  where
    +    -- A tensor of zeros with the same shape as the i'th output.
    +    zero i = zerosLike $ toT (Output i o)
    +
    +
    +-- | Update the pending gradients of a node's inputs.
    +updatePendingGradients :: forall a. (TensorType a, Num a)
    +                       => FGL.Context NodeDef EdgeLabel
    +                       -> [Maybe (Tensor Value a)]
    +                       -- ^ Gradient of each input tensor.
    +                       -> GradientsState a
    +                       -> GradientsState a
    +updatePendingGradients (inputEdges, _, nodeDef, _) inputGrads initState =
    +    foldl' go initState inputEdges
    +  where
    +    go :: GradientsState a -> (EdgeLabel, FGL.Node) -> GradientsState a
    +    go state ((outIndex, OutputIx inIndex), node) =
    +        case maybeGradient of
    +            Nothing -> state
    +            Just g ->
    +                -- Add to the list of pending gradients for this tensor.
    +                state & gradientsPending
    +                      . at node
    +                      . nonEmpty
    +                      . outputIxAt outIndex
    +                      . nonEmpty
    +                      %~ (g:)
    +      where
    +        badSizeErr = error $ printf "updatePendingGradients: bad input index \
    +                                    \%d for inputGrads of length %d in %s"
    +                                    inIndex (length inputGrads)
    +                                    (show (nodeDef ^. name))
    +        maybeGradient = fromMaybe badSizeErr (safeIndex inputGrads inIndex)
    +
    +
    +-- | Create a graph that includes a node and its transitive dependencies.
    +createGraph :: NodeName -> (NodeName -> NodeDef)
    +            -> (Graph, Map NodeName FGL.Node)
    +createGraph nodeName nodeDefLookup = (FGL.nmap nodeDefLookup graph, nodeMap)
    +  where
    +    -- Parse a tensor name.
    +    parseTensorName :: Text -> Maybe (NodeName, OutputIx)
    +    parseTensorName n
    +        | Text.null n        = error "parseTensorName: empty name"
    +        | Text.head n == '^' = Nothing  -- Control edge
    +        | otherwise          =
    +            let (nm, indexStr) = Text.breakOn ":" n
    +                index | Text.null indexStr = 0
    +                      | otherwise = read $ Text.unpack $ Text.tail indexStr
    +            in Just (NodeName nm, OutputIx index)
    +
    +    -- Build a map from node name to outward edges.
    +    --
    +    -- The state is the set of visited nodes.
    +    collect :: Maybe (NodeName, OutputIx, OutputIx)
    +            -> NodeName
    +            -> State (Set NodeName)
    +                     (Map NodeName [(NodeName, OutputIx, OutputIx)])
    +    collect outgoingEdge nm = do
    +        let nextLookup = Map.singleton nm (maybeToList outgoingEdge)
    +        seen <- gets (Set.member nm)
    +        modify (Set.insert nm)
    +        if seen
    +            then pure nextLookup
    +            else do
    +                let inputs = nodeDefLookup nm ^. input
    +                    recurse inIndex (parentName, outIndex) =
    +                        collect (Just (nm, outIndex, inIndex)) parentName
    +                subEdgeLookups <-
    +                    zipWithM recurse [0..] $ mapMaybe parseTensorName inputs
    +                pure $ Map.unionsWith (++) (nextLookup:subEdgeLookups)
    +
    +    edgeLookup = evalState (collect Nothing nodeName) Set.empty
    +    -- Associate an ID with each node name.
    +    nodeMap = Map.fromList $ zip (Map.keys edgeLookup) [0..]
    +    -- Create the graph.
    +    graph = FGL.mkGraph (swap <$> Map.toList nodeMap)
    +                        [ (nodeMap Map.! n, nodeMap Map.! m, (i, j))
    +                        | (n, edges) <- Map.toList edgeLookup
    +                        , (m, i, j) <- edges
    +                        ]
    +
    +-- | Function to compute the gradient of y w.r.t. each input.
    +--
    +-- Let y be an arbitrary tensor
    +-- and [w_0, ..., w_n] be the output tensors of a node
    +-- and [v_0, ..., v_n] be the input tensors of the same node.
    +--
    +-- Given [∂y/∂w_0, ..., ∂y/∂w_n] and [v_0, ..., v_n], a GradientFunc computes
    +-- [∂y/∂v_0, ..., ∂y/∂v_n] for a particular op type.
    +--
    +-- A Nothing gradient is equivalent to zero (but allows for short circuiting
    +-- computation when all the gradients for something are Nothing).
    +type GradientFunc a = NodeDef
    +                    -> [Output]
    +                    -- ^ Input tensors.
    +                    -> [Tensor Value a]
    +                    -- ^ Gradient of y w.r.t. each output tensor.
    +                    -> [Maybe (Tensor Value a)]
    +                    -- ^ Gradient of y w.r.t. each input tensor.
    +
    +
    +-- TODO(fmayle): Assert the type is correct.
    +-- | Create a Tensor from an Output.
    +toT :: Output -> Tensor Value a
    +toT = Tensor ValueKind
    +
    +-- | The gradient function for an op type.
    +--
    +-- These implementations should match their python counterparts in:
    +-- third_party/tensorflow/python/ops/*_grad.py
    +opGrad :: forall a . GradientCompatible a => Text -> GradientFunc a
    +
    +opGrad "Abs" _ [toT -> x] [dz] = [Just $ dz * signum x]
    +opGrad "Neg" _ [_] [dz] = [Just $ -dz]
    +opGrad "Relu" _ [toT -> x] [dz] = [Just $ reluGrad dz x]
    +
    +opGrad "Square" _ [toT -> x] [dz] =
    +    -- TODO(fmayle): Handle complex numbers.
    +    -- TODO(fmayle): The python code makes dz a control dependency of the 2*x
    +    -- (for performance reasons?). Will need to put these functions in the Build
    +    -- monad to replicate that.
    +    [Just $ dz * (2 * x)]
    +
    +opGrad "Gather" _ [toT -> x, toT -> indices] [dz] =
    +    -- TODO(fmayle): The python version uses a better performance implementation
    +    -- when the shape is known without having to run the graph.
    +    -- TODO(fmayle): We shouldn't convert the result to a dense tensor. Sparse
    +    -- tensor support will require some thinking.
    +    [ Just $ CoreOps.unsortedSegmentSum values indices' numRows
    +    , Nothing
    +    ]
    +  where
    +    -- TODO(gnezdo): Use colocateWith but it requires Build monad.
    +    denseShape = shape (x :: Tensor Value a)
    +    numRows = CoreOps.slice denseShape 0 (1 :: Tensor Value Int32)
    +    valuesShape = CoreOps.concat 0 [
    +                                 allDimensions
    +                               , CoreOps.slice denseShape 1 (-1 :: Tensor Value Int32)
    +                               ]
    +    values = reshape dz valuesShape
    +    -- TODO(fmayle): This could be either Int32 or Int64.
    +    indices' = reshape indices allDimensions :: Tensor Value Int32
    +
    +opGrad "Max" _ [toT -> x, toT -> indices] [dz] =
    +    [Just $ indicators `CoreOps.div` numSelected * dz', Nothing]
    +  where
    +    sx = shape (x :: Tensor Value a)
    +    outputShapeKeptDims = reducedShape sx (indices :: Tensor Value Int32)
    +    x' = reshape x outputShapeKeptDims
    +    dz' = reshape dz outputShapeKeptDims
    +    indicators = CoreOps.cast $ CoreOps.equal x' x
    +    numSelected = reshape (sum indicators indices) outputShapeKeptDims
    +
    +-- Min and Max have identical gradient implementations.
    +opGrad "Min" u v w = opGrad "Max" u v w
    +
    +opGrad "Sum" _ [toT -> x, toT -> indices] [dz] =
    +    [ Just $ CoreOps.tile grad tileScaling, Nothing ]
    +  where
    +    -- TODO(gnezdo): Implement the fast-path from math_grad._SumGrad.
    +    sx = shape (x :: Tensor Value a)
    +    outputShapeKeptDims = reducedShape sx (indices :: Tensor Value Int32)
    +    tileScaling = safeShapeDiv sx outputShapeKeptDims
    +    grad = reshape dz outputShapeKeptDims
    +
    +opGrad "Mean" u v@[toT -> x, _] w =
    +    [Just $ dz `CoreOps.div` CoreOps.cast factor, Nothing]
    +  where
    +    [Just dz, Nothing] = opGrad "Sum" u v w
    +    inputShape = shape (x :: Tensor Value a)
    +    outputShape = shape (dz :: Tensor Value a)
    +    -- TODO(fmayle): Add fast path when shape is known.
    +    inputSize = CoreOps.prod inputShape $ rangeOfRank inputShape
    +    outputSize = CoreOps.prod outputShape $ rangeOfRank outputShape
    +    factor = safeShapeDiv inputSize outputSize
    +
    +opGrad "Add" _ [toT -> x, toT -> y] [dz] =
    +    [ Just $ reshape (sum dz rx) sx
    +    , Just $ reshape (sum dz ry) sy ]
    +  where
    +    sx = shape (x :: Tensor Value a)
    +    sy = shape (y :: Tensor Value a)
    +    (rx, ry) = broadcastGradientArgs sx sy
    +
    +opGrad "Sub" u v w =
    +    [Just x, Just (-y)]
    +  where
    +    [Just x, Just y] = opGrad "Add" u v w
    +
    +opGrad "SoftmaxCrossEntropyWithLogits" _ [toT -> x, toT -> y] [dz, _] =
    +    [ Just $ expandDims dz (-1) * snd (softmaxCrossEntropyWithLogits x y)
    +    , Nothing ]
    +
    +opGrad "Mul" _ [toT -> x, toT -> y] [dz] =
    +    -- TODO(fmayle): Handle complex numbers.
    +    [ Just $ reshape (sum (dz * y) rx) sx
    +    , Just $ reshape (sum (x * dz) ry) sy ]
    +  where
    +    sx = shape (x :: Tensor Value a)
    +    sy = shape (y :: Tensor Value a)
    +    (rx, ry) = broadcastGradientArgs sx sy
    +
    +opGrad "Div" _ [toT -> x, toT -> y] [dz] =
    +    -- TODO(fmayle): Handle complex numbers.
    +    -- TODO(gnezdo): Provide Fractional instance and use '/' instead of div.
    +    [ Just $ reshape (sum (dz `CoreOps.div` y) rx) sx
    +    , Just $ reshape (sum (dz * (negate x `CoreOps.div` (y * y))) ry) sy
    +    ]
    +  where
    +    sx = shape (x :: Tensor Value a)
    +    sy = shape (y :: Tensor Value a)
    +    (rx, ry) = broadcastGradientArgs sx sy
    +
    +opGrad "MatMul" nodeDef [toT -> x, toT -> y] [dz] =
    +    let transposeA = lookupAttr nodeDef "transpose_a"
    +        transposeB = lookupAttr nodeDef "transpose_b"
    +        transAttrs a b =
    +            (tensorAttr "transpose_a" .~ a) . (tensorAttr "transpose_b" .~ b)
    +    in case (transposeA, transposeB) of
    +       (False, False) ->
    +           [ Just $ (dz `matMul` y) & transAttrs False True
    +           , Just $ (x `matMul` dz) & transAttrs True False ]
    +       (False, True) ->
    +           [ Just $ dz `matMul` y
    +           , Just $ (x `matMul` dz) & transAttrs True False ]
    +       (True, False) ->
    +           [ Just $ (dz `matMul` y) & transAttrs False True
    +           , Just $ x `matMul` dz ]
    +       (True, True) ->
    +           [ Just $ (dz `matMul` y) & transAttrs True True
    +           , Just $ (x `matMul` dz) & transAttrs True True ]
    +
    +opGrad "Transpose" _ [_, toT -> p] [dz] =
    +    [ Just $ CoreOps.transpose dz
    +            (CoreOps.invertPermutation p :: Tensor Value Int32)
    +    , Nothing
    +    ]
    +
    +opGrad "Conv2D" nodeDef [toT -> x, toT -> y] [dz] =
    +    [ Just $ CoreOps.conv2DBackpropInput (shape x) y dz
    +          & tensorAttr "strides" .~ strides
    +          & tensorAttr "padding" .~ padding
    +          & tensorAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu
    +          & tensorAttr "data_format" .~ dataFormat
    +    , Just $ CoreOps.conv2DBackpropFilter x (shape y) dz
    +          & tensorAttr "strides" .~ strides
    +          & tensorAttr "padding" .~ padding
    +          & tensorAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu
    +          & tensorAttr "data_format" .~ dataFormat
    +    ]
    +  where
    +    strides = lookupAttr nodeDef "strides" :: [Int64]
    +    padding = lookupAttr nodeDef "padding" :: ByteString
    +    useCudnnOnGpu = lookupAttr nodeDef "use_cudnn_on_gpu" :: Bool
    +    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
    +
    +opGrad "MaxPool" nodeDef [toT -> x] [dz] =
    +    [ Just $ CoreOps.maxPoolGrad x output dz
    +          & tensorAttr "ksize" .~ ksize
    +          & tensorAttr "strides" .~ strides
    +          & tensorAttr "padding" .~ padding
    +          & tensorAttr "data_format" .~ dataFormat
    +    ]
    +  where
    +    output :: Tensor Value a
    +    output = toT $ Output 0 (Rendered nodeDef)
    +    ksize = lookupAttr nodeDef "ksize" :: [Int64]
    +    strides = lookupAttr nodeDef "strides" :: [Int64]
    +    padding = lookupAttr nodeDef "padding" :: ByteString
    +    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
    +
    +opGrad "Reshape" _ [toT -> x, _] [dz] =
    +    [Just $ reshape dz $ shape (x :: Tensor Value a), Nothing]
    +
    +opGrad "OneHot" _ _ _ = [Nothing, Nothing, Nothing, Nothing]
    +opGrad "TruncatedNormal" _ _ _ = [Nothing]
    +
    +opGrad "RefIdentity" _ _ [dz] = [Just dz]
    +opGrad "Cast" nodeDef _ [dz] = [Just reverseCast]
    +  where
    +    -- TODO(gnezdo): too permissive, python only allows float types as src_type.
    +    reverseCast =
    +        buildOp (opDef "Cast"
    +                 & opAttr "DstT" .~ (lookupAttr nodeDef "SrcT" :: ByteString)
    +                 & opAttr "SrcT" .~ (lookupAttr nodeDef "DstT" :: ByteString))
    +        dz
    +
    +opGrad "DynamicStitch" nodeDef inputs [dz] =
    +    replicate halfLen Nothing ++ valuesGrads
    +  where
    +    halfLen =
    +        let len = length inputs
    +            half = len `div` 2
    +        in if 2 * half == len
    +           then half
    +           else error ("Uneven input size " ++ show (len, showMessage nodeDef))
    +    valuesGrads = [ Just $ CoreOps.gather dz (toT idx :: Tensor Value Int32)
    +                  | idx <- take halfLen inputs
    +                  ]
    +
    +opGrad "DynamicPartition" nodeDef [toT -> xs, toT -> indices] dz =
    +    [ Just reconstructed, Nothing ]
    +  where
    +    reconstructed = CoreOps.reshape stitched
    +                    (CoreOps.shape (xs :: Tensor Value a) :: Tensor Value Int32)
    +    stitched = CoreOps.dynamicStitch partitionedIndices dz
    +    partitionedIndices = CoreOps.dynamicPartition np originalIndices indices
    +    np = lookupAttr nodeDef "num_partitions" :: Int64
    +    originalIndices =
    +        CoreOps.reshape (CoreOps.range 0 (CoreOps.size indices) 1) prefixShape
    +    prefixShape = shapeInt32 indices
    +    shapeInt32 = CoreOps.shape :: Tensor Value Int32 -> Tensor Value Int32
    +
    +opGrad "Select" _ [toT -> c, toT -> x, _] [dz] =
    +    [ Nothing
    +    , Just $ CoreOps.select c dz zeros
    +    , Just $ CoreOps.select c zeros dz
    +    ]
    +  where zeros = CoreOps.zerosLike x
    +
    +-- TODO(gnezdo): Unlike Python, no control dependency on dz.
    +opGrad "Log" _ [toT -> x] [dz] = [ Just $ dz * CoreOps.inv x ]
    +-- TODO(gnezdo): Reuse the output instead of doing another exp,
    +-- though, it is probably CSE'd away anyway.
    +opGrad "Exp" _ [toT -> x] [dz] = [ Just $ dz * CoreOps.exp x ]
    +opGrad "SparseSegmentSum" _ [toT -> x, toT -> y, toT -> t] [dz] =
    +    [ Just $ CoreOps.unsortedSegmentSum
    +             (CoreOps.gather dz (t :: Tensor Value Int32))
    +             (y :: Tensor Value Int32) inputRows
    +    , Nothing
    +    , Nothing
    +    ]
    +  where inputRows = CoreOps.slice (shape (x :: Tensor Value a)) (scalar (0 :: Int32)) (scalar 1)
    +
    +opGrad "LabelClasses" _ _ _ = [Nothing, Nothing]
    +opGrad "LabelWeights" _ _ _ = [Nothing]
    +opGrad "Size" _ _ _ = [Nothing]
    +opGrad "ZerosLike" _ _ _ = [Nothing]
    +
    +-- TODO(fmayle): These can go away if we properly prune the graph.
    +opGrad "Const" _ _ _ = [Nothing, Nothing]
    +opGrad "Placeholder" _ _ _ = []
    +opGrad "Variable" _ _ _ = []
    +
    +opGrad n nodeDef ins grads =
    +    error $ "no gradient implemented for " ++
    +            show (n, length ins, length grads, showMessage nodeDef, ins)
    +
    +-- | The number of outputs for an op type.
    +numOutputs :: NodeDef -> OutputIx
    +numOutputs o =
    +    case o ^. op of
    +        "Abs" -> 1
    +        "Add" -> 1
    +        "Cast" -> 1
    +        "Const" -> 1
    +        "Conv2D" -> 1
    +        "Div" -> 1
    +        "DynamicStitch" -> 1
    +        "DynamicPartition" ->
    +            fromIntegral (lookupAttr o "num_partitions" :: Int64)
    +        "Exp" -> 1
    +        "Gather" -> 1
    +        "LabelClasses" -> 1
    +        "LabelWeights" -> 1
    +        "Log" -> 1
    +        "MatMul" -> 1
    +        "Max" -> 1
    +        "MaxPool" -> 1
    +        "Mean" -> 1
    +        "Min" -> 1
    +        "Mul" -> 1
    +        "Neg" -> 1
    +        "Placeholder" -> 1
    +        "OneHot" -> 1
    +        "RefIdentity" -> 1
    +        "Relu" -> 1
    +        "Reshape" -> 1
    +        "Select" -> 1
    +        "Size" -> 1
    +        "SoftmaxCrossEntropyWithLogits" -> 2
    +        "Square" -> 1
    +        "SparseSegmentSum" -> 1
    +        "Sub" -> 1
    +        "Sum" -> 1
    +        "Transpose" -> 1
    +        "TruncatedNormal" -> 1
    +        "Variable" -> 1
    +        "ZerosLike" -> 1
    +        _ -> error $ "numOuputs not implemented for " ++ show (o ^. op)
    +
    +-- Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`
    +safeShapeDiv x y = x `CoreOps.div` (CoreOps.maximum y 1)
    +
    +allDimensions = vector [-1 :: Int32]
    +
    +rangeOfRank x = CoreOps.range 0 (CoreOps.rank x) 1
    +
    +lookupAttr nodeDef attrName = nodeDef ^. attr . at attrName . non def . attrLens
    +
    + diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html new file mode 100644 index 0000000..b26d3aa --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html @@ -0,0 +1,307 @@ + + + + + +src/TensorFlow/Ops.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +-- | This module contains definitions for some built-in TensorFlow operations.
    +--
    +-- Note that certain, "stateful" ops like 'variable' and 'assign' return a
    +-- 'Build' action (e.g., @Build (Tensor Ref a)@ instead of a pure value; the
    +-- returned 'Tensor's are always rendered in the current 'Build' context.  This
    +-- approach helps us avoid problems with inlining or common subexpression
    +-- elimination, by writing
    +--
    +-- > do
    +-- >     v <- variable []
    +-- >     w <- assign v 3
    +-- >     render $ w * w
    +--
    +-- instead of
    +--
    +-- > let
    +-- >    v = variable []
    +-- >    w = assign v 3
    +-- > in w * w
    +--
    +-- since the latter could be reasonably transformed by the compiler into (or
    +-- vice versa)
    +--
    +-- > let
    +-- >    v = variable []
    +-- >    w = assign v 3
    +-- >    w' = assign v 3
    +-- > in w * w'
    +--
    +-- Ops should return a 'Build' action if their original 'OpDef' marks them as
    +-- stateful, or if they take any Refs as input.  (This mirrors the rules that
    +-- TensorFlow uses to avoid common subexpression elimination.)
    +{-# LANGUAGE ConstraintKinds #-}
    +{-# LANGUAGE DataKinds #-}
    +{-# LANGUAGE FlexibleInstances #-}
    +{-# LANGUAGE OverloadedLists #-}
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE RankNTypes #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +{-# LANGUAGE TypeFamilies #-}
    +{-# LANGUAGE UndecidableInstances #-}
    +{-# OPTIONS_GHC -fno-warn-orphans #-}
    +
    +module TensorFlow.Ops
    +    ( CoreOps.add
    +    , CoreOps.abs
    +    , CoreOps.addN
    +    , CoreOps.argMax
    +    , assign
    +    , CoreOps.broadcastGradientArgs
    +    , CoreOps.cast
    +    , CoreOps.concat
    +    , constant
    +    , expandDims
    +    , initializedVariable
    +    , zeroInitializedVariable
    +    , CoreOps.fill
    +    , CoreOps.matMul
    +    , matTranspose
    +    , CoreOps.mul
    +    , CoreOps.neg
    +    , CoreOps.pack
    +    , placeholder
    +    , CoreOps.range
    +    , reducedShape
    +    , CoreOps.relu
    +    , CoreOps.reluGrad
    +    , CoreOps.reshape
    +    , restore
    +    , restoreFromName
    +    , save
    +    , scalar
    +    , shape
    +    , CoreOps.sign
    +    , CoreOps.size
    +    , CoreOps.softmax
    +    , CoreOps.softmaxCrossEntropyWithLogits
    +    , CoreOps.sparseToDense
    +    , CoreOps.sub
    +    , CoreOps.sum
    +    , CoreOps.topK
    +    , CoreOps.transpose
    +    , truncatedNormal
    +    , variable
    +    , vector
    +    , zeros
    +    , CoreOps.zerosLike
    +    ) where
    +
    +import Data.ByteString (ByteString)
    +import Data.Complex (Complex)
    +import Data.Int (Int32, Int64)
    +import Prelude hiding (abs, sum, concat)
    +import Data.ProtoLens (def)
    +import Data.Text.Encoding (encodeUtf8)
    +import Lens.Family2 ((.~), (&))
    +import Text.Printf (printf)
    +import Proto.Tensorflow.Core.Framework.Tensor
    +    ( TensorProto
    +    , dtype
    +    , tensorShape
    +    )
    +import qualified Proto.Tensorflow.Core.Framework.TensorShape
    +  as TensorShape
    +import TensorFlow.Build
    +import TensorFlow.BuildOp
    +import TensorFlow.ControlFlow (group)
    +import TensorFlow.Output (unNodeName)
    +import TensorFlow.Tensor
    +import TensorFlow.Types
    +
    +import qualified TensorFlow.GenOps.Core as CoreOps
    +
    +import qualified Prelude (abs)
    +
    +-- TODO: Look into hs-boot refactoring to allow mutually recursive imports.
    +-- | Must be defined as an orphan because of the dependency order between Ops
    +-- and Tensor.
    +--
    +-- The indirect constraint "v ~ Value" helps disambiguate types, for example in
    +-- "neg 1 :: Tensor Value Float", it helps find the type of the subexpression
    +-- "1".
    +instance ( TensorType a
    +         , Num a
    +         , v ~ Value
    +         , OneOf '[ Double, Float, Int32, Int64
    +                  , Complex Float, Complex Double] a) => Num (Tensor v a) where
    +    (+) = CoreOps.add
    +    (*) = CoreOps.mul
    +    (-) = CoreOps.sub
    +    abs = CoreOps.abs
    +    fromInteger = scalar . fromInteger
    +    signum = CoreOps.sign
    +    negate = CoreOps.neg
    +
    +matTranspose :: forall a v . TensorType a
    +             => Tensor v a -> Tensor Value a
    +matTranspose = flip CoreOps.transpose (vector [1, 0 :: Int32])
    +
    +-- | Create a new, uninitialized stateful Tensor of the given shape.
    +variable :: forall a . TensorType a => Shape -> Build (Tensor Ref a)
    +variable shape' = buildOp $ opDef "Variable"
    +                          & opAttr "shape" .~ shape'
    +                          & opAttr "dtype" .~ tensorType (undefined :: a)
    +
    +placeholder :: forall a . TensorType a => Shape -> Build (Tensor Value a)
    +placeholder shape' =
    +    buildOp $ opDef "Placeholder"
    +            & opAttr "dtype" .~ tensorType (undefined :: a)
    +            & opAttr "shape" .~ shape'
    +
    +-- Assign returns the input ref.
    +assign :: forall a v . TensorType a
    +       => Tensor Ref a -> Tensor v a -> Build (Tensor Ref a)
    +assign = buildOp $ opDef "Assign"
    +                      & opAttr "T" .~ tensorType (undefined :: a)
    +                      & opAttr "use_locking" .~ True
    +
    +-- | Creates a variable initialized to the given value.
    +-- Initialization happens next time session runs.
    +initializedVariable :: forall a . TensorType a
    +                    => Tensor Value a -> Build (Tensor Ref a)
    +initializedVariable initializer = do
    +    v <- variable []  -- The shape is not known initially.
    +    (i :: Tensor Ref a) <-
    +        buildOp (opDef "Assign"
    +                 & opAttr "T" .~ tensorType (undefined :: a)
    +                 & opAttr "use_locking" .~ True
    +                 & opAttr "validate_shape" .~ False
    +                 )
    +        v initializer
    +    addInitializer =<< group i
    +    return v
    +
    +-- | Creates a zero-initialized variable with the given shape.
    +zeroInitializedVariable
    +  :: (TensorType a, Num a) =>
    +     TensorFlow.Types.Shape -> Build (Tensor TensorFlow.Tensor.Ref a)
    +zeroInitializedVariable = initializedVariable . zeros
    +
    +-- TODO: Support heterogeneous list of tensors.
    +save :: forall a v . TensorType a
    +        => ByteString     -- ^ File path.
    +        -> [Tensor v a]  -- ^ Tensors to save.
    +        -> Build ControlNode
    +save path xs = do
    +    let toByteStringTensor = scalar . encodeUtf8 . unNodeName
    +    names <- mapM (fmap toByteStringTensor . renderNodeName) xs
    +    let types = replicate (length xs) (tensorType (undefined :: a))
    +    let saveOp = buildOp $ opDef "Save"
    +                         & opAttr "T" .~ types
    +    saveOp (scalar path) (CoreOps.pack names) xs
    +
    +-- | Restore a tensor's value from a checkpoint file.
    +--
    +-- This version allows restoring from a checkpoint file that uses a different
    +-- tensor name than the variable.
    +restoreFromName :: forall a . TensorType a
    +                => ByteString    -- ^ File path.
    +                -> ByteString    -- ^ Tensor name override.
    +                -> Tensor Ref a  -- ^ Tensor to restore.
    +                -> Build ControlNode
    +restoreFromName path name x = do
    +    let restoreOp = buildOp $ opDef "Restore"
    +                            & opAttr "dt" .~ tensorType (undefined :: a)
    +    group =<< assign x (restoreOp (scalar path) (scalar name) :: Tensor Value a)
    +
    +-- | Restore a tensor's value from a checkpoint file.
    +restore :: forall a . TensorType a
    +        => ByteString    -- ^ File path.
    +        -> Tensor Ref a  -- ^ Tensor to restore.
    +        -> Build ControlNode
    +restore path x = do
    +    name <- encodeUtf8 . unNodeName <$> renderNodeName x
    +    restoreFromName path name x
    +
    +-- | Create a constant tensor.
    +--
    +-- The values should be in row major order, e.g.,
    +--
    +--   element 0:   index (0, ..., 0)
    +--   element 1:   index (0, ..., 1)
    +--   ...
    +constant :: forall a . TensorType a => Shape -> [a] -> Tensor Value a
    +constant (Shape shape') values
    +    | invalidLength = error invalidLengthMsg
    +    | otherwise = buildOp $ opDef "Const"
    +                          & opAttr "value" .~ typedNode
    +                          & opAttr "dtype" .~ nodeType
    +  where
    +    invalidLength = product shape' /= fromIntegral (length values)
    +    invalidLengthMsg = printf "invalid tensor length: expected %d got %d"
    +                              (product shape')
    +                              (length values)
    +    nodeType = tensorType (undefined :: a)
    +    typedNode :: TensorProto
    +    typedNode = def
    +                & dtype .~ nodeType
    +                & tensorShape.TensorShape.dim .~
    +                      [def & TensorShape.size .~ x | x <- shape']
    +                & tensorVal .~ values
    +
    +-- | Create a constant vector.
    +vector :: TensorType a => [a] -> Tensor Value a
    +vector xs = constant [fromIntegral $ length xs] xs
    +
    +-- | Create a constant scalar.
    +scalar :: forall a . TensorType a => a -> Tensor Value a
    +scalar x = constant [] [x]
    +
    +-- Random tensor from the unit normal distribution with bounded values.
    +truncatedNormal :: forall a v . TensorType a
    +                => Tensor v Int64  -- ^ Shape.
    +                -> Build (Tensor Value a)
    +truncatedNormal = buildOp $ opDef "TruncatedNormal"
    +                          & opAttr "dtype" .~ tensorType (undefined :: a)
    +                          & opAttr "T" .~ tensorType (undefined :: Int64)
    +
    +zeros :: forall a . (Num a, TensorType a) => Shape -> Tensor Value a
    +zeros (Shape shape') = CoreOps.fill (vector $ map fromIntegral shape') (scalar 0)
    +
    +shape :: (TensorType t) => Tensor v1 t -> Tensor Value Int32
    +shape = CoreOps.shape
    +
    +expandDims :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t
    +expandDims = CoreOps.expandDims
    +
    +-- | Helper function for reduction ops (translation of math_ops.reduced_shape).
    +reducedShape :: (OneOf '[ Int32, Int64 ] t1, OneOf '[ Int32, Int64 ] t2) =>
    +                Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32
    +reducedShape inputShape axes =
    +    let inputShape32 = toInt32 inputShape         -- [2, 3, 5, 7]
    +        axes32 = toInt32 axes                     -- [1, 2]
    +        toInt32 x = CoreOps.cast x :: Tensor Value Int32
    +        inputRank = CoreOps.size inputShape32     -- 4
    +        axesMod = (axes32 + inputRank) `CoreOps.mod` inputRank
    +        axesShape = shape axesMod                 -- [2]
    +    in CoreOps.dynamicStitch                      -- [2, 1, 1, 7]
    +         [CoreOps.range 0 inputRank 1,            -- [0, 1, 2, 3]
    +           axesMod]                               -- [1, 2]
    +         [inputShape32,                           -- [2, 3, 5, 7]
    +           CoreOps.fill axesShape 1]              -- [1, 1]
    +
    + diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-ops-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt b/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt new file mode 100644 index 0000000..c3c9ca3 --- /dev/null +++ b/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt @@ -0,0 +1,378 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Friendly layer around TensorFlow bindings. +-- +-- Please see README.md +@package tensorflow-ops +@version 0.1.0.0 + + +-- | This module contains definitions for some built-in TensorFlow +-- operations. +-- +-- Note that certain, "stateful" ops like variable and +-- assign return a Build action (e.g., Build (Tensor +-- Ref a) instead of a pure value; the returned Tensors are +-- always rendered in the current Build context. This approach +-- helps us avoid problems with inlining or common subexpression +-- elimination, by writing +-- +--
    +--   do
    +--       v <- variable []
    +--       w <- assign v 3
    +--       render $ w * w
    +--   
    +-- +-- instead of +-- +--
    +--   let
    +--      v = variable []
    +--      w = assign v 3
    +--   in w * w
    +--   
    +-- +-- since the latter could be reasonably transformed by the compiler into +-- (or vice versa) +-- +--
    +--   let
    +--      v = variable []
    +--      w = assign v 3
    +--      w' = assign v 3
    +--   in w * w'
    +--   
    +-- +-- Ops should return a Build action if their original +-- OpDef marks them as stateful, or if they take any Refs as +-- input. (This mirrors the rules that TensorFlow uses to avoid common +-- subexpression elimination.) +module TensorFlow.Ops + +-- | Returns x + y element-wise. +-- +--
      +--
    • NOTE*: Add supports broadcasting. AddN does not. +-- More about broadcasting here
    • +--
    +add :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the absolute value of a tensor. +-- +-- Given a tensor x, this operation returns a tensor containing +-- the absolute value of each element in x. For example, if x is +-- an input element and y is an output element, this operation computes +-- \(y = |x|\). +abs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) => Tensor v1 t -> Tensor Value t + +-- | Add all input tensors element wise. +addN :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => [Tensor v1 t] -> Tensor Value t + +-- | Returns the index with the largest value across dimensions of a +-- tensor. +argMax :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 +assign :: TensorType a => Tensor Ref a -> Tensor v a -> Build (Tensor Ref a) + +-- | Return the reduction indices for computing gradients of s0 op s1 with +-- broadcast. +-- +-- This is typically used by gradient computations for a broadcasting +-- operation. +broadcastGradientArgs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) + +-- | Cast x of type SrcT to y of DstT. +cast :: (TensorType dstT, TensorType srcT) => Tensor v1 srcT -> Tensor Value dstT + +-- | Concatenates tensors along one dimension. +concat :: TensorType t => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t + +-- | Create a constant tensor. +-- +-- The values should be in row major order, e.g., +-- +-- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... +constant :: TensorType a => Shape -> [a] -> Tensor Value a +expandDims :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t + +-- | Creates a variable initialized to the given value. Initialization +-- happens next time session runs. +initializedVariable :: TensorType a => Tensor Value a -> Build (Tensor Ref a) + +-- | Creates a zero-initialized variable with the given shape. +zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a) + +-- | Creates a tensor filled with a scalar value. +-- +-- This operation creates a tensor of shape dims and fills it +-- with value. +-- +-- For example: +-- +-- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) +-- ==> [[9, 9, 9] [9, 9, 9]] ``` +fill :: TensorType t => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t + +-- | Multiply the matrix "a" by the matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" (after being transposed if transpose_a is true) must match the +-- outer dimension of "b" (after being transposed if transposed_b is +-- true). +-- +--
      +--
    • Note*: The default kernel implementation for MatMul on GPUs uses +-- cublas.
    • +--
    +matMul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +matTranspose :: TensorType a => Tensor v a -> Tensor Value a + +-- | Returns x * y element-wise. +-- +--
      +--
    • NOTE*: Mul supports broadcasting. More about broadcasting +-- here
    • +--
    +mul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes numerical negative value element-wise. +-- +-- I.e., \(y = -x\). +neg :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t + +-- | Packs a list of N rank-R tensors into one +-- rank-`(R+1)` tensor. +-- +-- Packs the N tensors in values into a tensor with +-- rank one higher than each tensor in values, by packing them +-- along the axis dimension. Given a list of tensors of shape +-- `(A, B, C)`; +-- +-- if `axis == 0` then the output tensor will have the shape +-- `(N, A, B, C)`. if `axis == 1` then the output tensor will +-- have the shape `(A, N, B, C)`. Etc. +-- +-- For example: +-- +-- ```prettyprint # x is [1, 4] # y is [2, 5] # +-- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # +-- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, +-- 6]] ``` +-- +-- This is the opposite of unpack. +pack :: TensorType t => [Tensor v1 t] -> Tensor Value t +placeholder :: TensorType a => Shape -> Build (Tensor Value a) + +-- | Creates a sequence of integers. +-- +-- This operation creates a sequence of integers that begins at +-- start and extends by increments of delta up to but +-- not including limit. +-- +-- For example: +-- +-- ``` # start is 3 # limit is 18 # delta is 3 +-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` +range :: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 tidx -> Tensor v2 tidx -> Tensor v3 tidx -> Tensor Value tidx + +-- | Helper function for reduction ops (translation of +-- math_ops.reduced_shape). +reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32 + +-- | Computes rectified linear: `max(features, 0)`. +relu :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor Value t + +-- | Computes rectified linear gradients for a Relu operation. +reluGrad :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Reshapes a tensor. +-- +-- Given tensor, this operation returns a tensor that has the +-- same values as tensor with shape shape. +-- +-- If one component of shape is the special value -1, the size of +-- that dimension is computed so that the total size remains constant. In +-- particular, a shape of `[-1]` flattens into 1-D. At most one +-- component of shape can be -1. +-- +-- If shape is 1-D or higher, then the operation returns a tensor +-- with shape shape filled with the values of tensor. In +-- this case, the number of elements implied by shape must be the +-- same as the number of elements in tensor. +-- +-- For example: +-- +-- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # +-- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], +-- [4, 5, 6], [7, 8, 9]] +-- +-- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor +-- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, +-- 2], [3, 3, 4, 4]] +-- +-- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, +-- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, +-- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, +-- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] +-- +-- # -1 can also be used to infer the shape +-- +-- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, +-- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: +-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, +-- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) +-- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, +-- 6, 6]]] +-- +-- # tensor t is [7] # shape `[]` reshapes to a scalar +-- reshape(t, []) ==> 7 ``` +reshape :: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor Value t + +-- | Restore a tensor's value from a checkpoint file. +restore :: TensorType a => ByteString -> Tensor Ref a -> Build ControlNode + +-- | Restore a tensor's value from a checkpoint file. +-- +-- This version allows restoring from a checkpoint file that uses a +-- different tensor name than the variable. +restoreFromName :: TensorType a => ByteString -> ByteString -> Tensor Ref a -> Build ControlNode +save :: TensorType a => ByteString -> [Tensor v a] -> Build ControlNode + +-- | Create a constant scalar. +scalar :: TensorType a => a -> Tensor Value a +shape :: (TensorType t) => Tensor v1 t -> Tensor Value Int32 + +-- | Returns an element-wise indication of the sign of a number. +-- +-- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`. +-- +-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y +-- = 0`. +sign :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t + +-- | Returns the size of a tensor. +-- +-- This operation returns an integer representing the number of elements +-- in input. +-- +-- For example: +-- +-- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]]] size(t) ==> 12 ``` +size :: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v1 t -> Tensor Value out_type + +-- | Computes softmax activations. +-- +-- For each batch i and class j we have +-- +-- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) +softmax :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor Value t + +-- | Computes softmax cross entropy cost and gradients to backpropagate. +-- +-- Inputs are the logits, not probabilities. +softmaxCrossEntropyWithLogits :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) + +-- | Converts a sparse representation into a dense tensor. +-- +-- Builds an array dense with shape output_shape such +-- that +-- +-- ```prettyprint # If sparse_indices is scalar dense[i] = (i == +-- sparse_indices ? sparse_values : default_value) +-- +-- # If sparse_indices is a vector, then for each i +-- dense[sparse_indices[i]] = sparse_values[i] +-- +-- # If sparse_indices is an n by d matrix, then for each i in [0, n) +-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = +-- sparse_values[i] ``` +-- +-- All other values in dense are set to default_value. +-- If sparse_values is a scalar, all sparse indices are set to +-- this single value. +-- +-- Indices should be sorted in lexicographic order, and indices must not +-- contain any repeats. If validate_indices is true, these +-- properties are checked during execution. +sparseToDense :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v1 tindices -> Tensor v2 tindices -> Tensor v3 t -> Tensor v4 t -> Tensor Value t + +-- | Returns x - y element-wise. +-- +--
      +--
    • NOTE*: Sub supports broadcasting. More about broadcasting +-- here
    • +--
    +sub :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t + +-- | Computes the sum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +sum :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t + +-- | Finds values and indices of the k largest elements for the +-- last dimension. +-- +-- If the input is a vector (rank-1), finds the k largest +-- entries in the vector and outputs their values and indices as vectors. +-- Thus `values[j]` is the j-th largest entry in input, +-- and its index is `indices[j]`. +-- +-- For matrices (resp. higher rank input), computes the top k +-- entries in each row (resp. vector along the last dimension). Thus, +-- +-- values.shape = indices.shape = input.shape[:-1] + [k] +-- +-- If two elements are equal, the lower-index element appears first. +-- +-- If k varies dynamically, use TopKV2 below. +topK :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Int64 -> Tensor v1 t -> (Tensor Value t, Tensor Value Int32) + +-- | Shuffle dimensions of x according to a permutation. +-- +-- The output y has the same rank as x. The shapes of +-- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for +-- i in [0, 1, ..., rank(x) - 1]` +transpose :: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v1 t -> Tensor v2 tperm -> Tensor Value t +truncatedNormal :: TensorType a => Tensor v Int64 -> Build (Tensor Value a) + +-- | Create a new, uninitialized stateful Tensor of the given shape. +variable :: TensorType a => Shape -> Build (Tensor Ref a) + +-- | Create a constant vector. +vector :: TensorType a => [a] -> Tensor Value a +zeros :: (Num a, TensorType a) => Shape -> Tensor Value a + +-- | Returns a tensor of zeros with the same shape and type as x. +zerosLike :: TensorType t => Tensor v1 t -> Tensor Value t +instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v ~ TensorFlow.Tensor.Value, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a) + + +-- | Parallel lookups on the list of tensors. +module TensorFlow.EmbeddingOps + +-- | Looks up ids in a list of embedding tensors. +-- +-- This function is used to perform parallel lookups on the list of +-- tensors in params. It is a generalization of gather, +-- where params is interpreted as a partition of a larger +-- embedding tensor. +-- +-- The partition_strategy is "mod", we assign each id to partition `p = +-- id % len(params)`. For instance, 13 ids are split across 5 partitions +-- as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]` +-- +-- The results of the lookup are concatenated into a dense tensor. The +-- returned tensor has shape `shape(ids) + shape(params)[1:]`. +embeddingLookup :: (TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v a] -> Tensor Value b -> Build (Tensor Value a) + +module TensorFlow.Gradient + +-- | Gradient of y w.r.t. each element of xs. +gradients :: (Num (Tensor v1 a), v1 ~ Value, GradientCompatible a) => Tensor v1 a -> [Tensor v2 a] -> Build [Tensor Value a] diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html new file mode 100644 index 0000000..57711ac --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.AttrValue

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.AttrValue

    Documentation

    data AttrValue Source

    Instances

    Eq AttrValue Source 
    Show AttrValue Source 
    Default AttrValue Source 
    Message AttrValue Source 
    HasField "b" AttrValue AttrValue Source 
    HasField "f" AttrValue AttrValue Source 
    HasField "func" AttrValue AttrValue Source 
    HasField "i" AttrValue AttrValue Source 
    HasField "list" AttrValue AttrValue Source 
    HasField "maybe'b" AttrValue AttrValue Source 
    HasField "maybe'f" AttrValue AttrValue Source 
    HasField "maybe'func" AttrValue AttrValue Source 
    HasField "maybe'i" AttrValue AttrValue Source 
    HasField "maybe'list" AttrValue AttrValue Source 
    HasField "maybe'placeholder" AttrValue AttrValue Source 
    HasField "maybe's" AttrValue AttrValue Source 
    HasField "maybe'shape" AttrValue AttrValue Source 
    HasField "maybe'tensor" AttrValue AttrValue Source 
    HasField "maybe'type'" AttrValue AttrValue Source 
    HasField "placeholder" AttrValue AttrValue Source 
    HasField "s" AttrValue AttrValue Source 
    HasField "shape" AttrValue AttrValue Source 
    HasField "tensor" AttrValue AttrValue Source 
    HasField "type'" AttrValue AttrValue Source 
    type Field "b" AttrValue = Bool Source 
    type Field "f" AttrValue = Float Source 
    type Field "func" AttrValue = NameAttrList Source 
    type Field "i" AttrValue = Int64 Source 
    type Field "list" AttrValue = AttrValue'ListValue Source 
    type Field "maybe'b" AttrValue = Maybe Bool Source 
    type Field "maybe'f" AttrValue = Maybe Float Source 
    type Field "maybe'func" AttrValue = Maybe NameAttrList Source 
    type Field "maybe'i" AttrValue = Maybe Int64 Source 
    type Field "maybe'list" AttrValue = Maybe AttrValue'ListValue Source 
    type Field "maybe'placeholder" AttrValue = Maybe Text Source 
    type Field "maybe's" AttrValue = Maybe ByteString Source 
    type Field "maybe'shape" AttrValue = Maybe TensorShapeProto Source 
    type Field "maybe'tensor" AttrValue = Maybe TensorProto Source 
    type Field "maybe'type'" AttrValue = Maybe DataType Source 
    type Field "placeholder" AttrValue = Text Source 
    type Field "s" AttrValue = ByteString Source 
    type Field "shape" AttrValue = TensorShapeProto Source 
    type Field "tensor" AttrValue = TensorProto Source 
    type Field "type'" AttrValue = DataType Source 

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    b :: forall msg msg'. HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg') Source

    f :: forall msg msg'. HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg') Source

    func :: forall msg msg'. HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg') Source

    i :: forall msg msg'. HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    list :: forall msg msg'. HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg') Source

    maybe'b :: forall msg msg'. HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg') Source

    maybe'f :: forall msg msg'. HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg') Source

    maybe'func :: forall msg msg'. HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg') Source

    maybe'i :: forall msg msg'. HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg') Source

    maybe'list :: forall msg msg'. HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg') Source

    maybe'placeholder :: forall msg msg'. HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg') Source

    maybe's :: forall msg msg'. HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg') Source

    maybe'shape :: forall msg msg'. HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg') Source

    maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') Source

    maybe'type' :: forall msg msg'. HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg') Source

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    placeholder :: forall msg msg'. HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg') Source

    s :: forall msg msg'. HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg') Source

    shape :: forall msg msg'. HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg') Source

    tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') Source

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html new file mode 100644 index 0000000..d9a1521 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Graph

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Graph

    Documentation

    data GraphDef Source

    Constructors

    GraphDef 

    Fields

    _GraphDef'node :: [NodeDef]
     
    _GraphDef'versions :: Maybe VersionDef
     
    _GraphDef'version :: Int32
     
    _GraphDef'library :: Maybe FunctionDefLibrary
     

    Instances

    Eq GraphDef Source 
    Show GraphDef Source 
    Default GraphDef Source 
    Message GraphDef Source 
    HasField "library" GraphDef GraphDef Source 
    HasField "maybe'library" GraphDef GraphDef Source 
    HasField "maybe'versions" GraphDef GraphDef Source 
    HasField "node" GraphDef GraphDef Source 
    HasField "version" GraphDef GraphDef Source 
    HasField "versions" GraphDef GraphDef Source 
    type Field "library" GraphDef Source 
    type Field "maybe'library" GraphDef Source 
    type Field "maybe'versions" GraphDef Source 
    type Field "node" GraphDef = [NodeDef] Source 
    type Field "version" GraphDef = Int32 Source 
    type Field "versions" GraphDef Source 

    library :: forall msg msg'. HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg') Source

    maybe'library :: forall msg msg'. HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg') Source

    maybe'versions :: forall msg msg'. HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg') Source

    node :: forall msg msg'. HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg') Source

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') Source

    versions :: forall msg msg'. HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html new file mode 100644 index 0000000..fa368f3 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.NodeDef

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.NodeDef

    Documentation

    data NodeDef Source

    Instances

    Eq NodeDef Source 
    Show NodeDef Source 
    Default NodeDef Source 
    Message NodeDef Source 
    HasField "attr" NodeDef NodeDef Source 
    HasField "device" NodeDef NodeDef Source 
    HasField "input" NodeDef NodeDef Source 
    HasField "name" NodeDef NodeDef Source 
    HasField "op" NodeDef NodeDef Source 
    type Field "attr" NodeDef = Map Text AttrValue Source 
    type Field "device" NodeDef = Text Source 
    type Field "input" NodeDef = [Text] Source 
    type Field "name" NodeDef = Text Source 
    type Field "op" NodeDef = Text Source 

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') Source

    input :: forall msg msg'. HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html new file mode 100644 index 0000000..c656d2f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.OpDef

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.OpDef

    Documentation

    data OpDef Source

    Instances

    Eq OpDef Source 
    Show OpDef Source 
    Default OpDef Source 
    Message OpDef Source 
    HasField "allowsUninitializedInput" OpDef OpDef Source 
    HasField "attr" OpDef OpDef Source 
    HasField "deprecation" OpDef OpDef Source 
    HasField "description" OpDef OpDef Source 
    HasField "inputArg" OpDef OpDef Source 
    HasField "isAggregate" OpDef OpDef Source 
    HasField "isCommutative" OpDef OpDef Source 
    HasField "isStateful" OpDef OpDef Source 
    HasField "maybe'deprecation" OpDef OpDef Source 
    HasField "name" OpDef OpDef Source 
    HasField "outputArg" OpDef OpDef Source 
    HasField "summary" OpDef OpDef Source 
    type Field "allowsUninitializedInput" OpDef = Bool Source 
    type Field "attr" OpDef = [OpDef'AttrDef] Source 
    type Field "deprecation" OpDef = OpDeprecation Source 
    type Field "description" OpDef = Text Source 
    type Field "inputArg" OpDef = [OpDef'ArgDef] Source 
    type Field "isAggregate" OpDef = Bool Source 
    type Field "isCommutative" OpDef = Bool Source 
    type Field "isStateful" OpDef = Bool Source 
    type Field "maybe'deprecation" OpDef = Maybe OpDeprecation Source 
    type Field "name" OpDef = Text Source 
    type Field "outputArg" OpDef = [OpDef'ArgDef] Source 
    type Field "summary" OpDef = Text Source 

    data OpDef'ArgDef Source

    Instances

    Eq OpDef'ArgDef Source 
    Show OpDef'ArgDef Source 
    Default OpDef'ArgDef Source 
    Message OpDef'ArgDef Source 
    HasField "description" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "isRef" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "name" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "numberAttr" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "type'" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "typeAttr" OpDef'ArgDef OpDef'ArgDef Source 
    HasField "typeListAttr" OpDef'ArgDef OpDef'ArgDef Source 
    type Field "description" OpDef'ArgDef = Text Source 
    type Field "isRef" OpDef'ArgDef = Bool Source 
    type Field "name" OpDef'ArgDef = Text Source 
    type Field "numberAttr" OpDef'ArgDef = Text Source 
    type Field "type'" OpDef'ArgDef = DataType Source 
    type Field "typeAttr" OpDef'ArgDef = Text Source 
    type Field "typeListAttr" OpDef'ArgDef = Text Source 

    data OpDef'AttrDef Source

    Instances

    Eq OpDef'AttrDef Source 
    Show OpDef'AttrDef Source 
    Default OpDef'AttrDef Source 
    Message OpDef'AttrDef Source 
    HasField "allowedValues" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "defaultValue" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "description" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "hasMinimum" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "maybe'allowedValues" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "maybe'defaultValue" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "minimum" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "name" OpDef'AttrDef OpDef'AttrDef Source 
    HasField "type'" OpDef'AttrDef OpDef'AttrDef Source 
    type Field "allowedValues" OpDef'AttrDef = AttrValue Source 
    type Field "defaultValue" OpDef'AttrDef = AttrValue Source 
    type Field "description" OpDef'AttrDef = Text Source 
    type Field "hasMinimum" OpDef'AttrDef = Bool Source 
    type Field "maybe'allowedValues" OpDef'AttrDef = Maybe AttrValue Source 
    type Field "maybe'defaultValue" OpDef'AttrDef = Maybe AttrValue Source 
    type Field "minimum" OpDef'AttrDef = Int64 Source 
    type Field "name" OpDef'AttrDef = Text Source 
    type Field "type'" OpDef'AttrDef = Text Source 

    data OpList Source

    Constructors

    OpList 

    Fields

    _OpList'op :: [OpDef]
     

    Instances

    Eq OpList Source 
    Show OpList Source 
    Default OpList Source 
    Message OpList Source 
    HasField "op" OpList OpList Source 
    type Field "op" OpList = [OpDef] Source 

    allowedValues :: forall msg msg'. HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg') Source

    allowsUninitializedInput :: forall msg msg'. HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg') Source

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    defaultValue :: forall msg msg'. HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg') Source

    deprecation :: forall msg msg'. HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg') Source

    description :: forall msg msg'. HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg') Source

    explanation :: forall msg msg'. HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg') Source

    hasMinimum :: forall msg msg'. HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg') Source

    inputArg :: forall msg msg'. HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg') Source

    isAggregate :: forall msg msg'. HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg') Source

    isCommutative :: forall msg msg'. HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg') Source

    isRef :: forall msg msg'. HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg') Source

    isStateful :: forall msg msg'. HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg') Source

    maybe'allowedValues :: forall msg msg'. HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg') Source

    maybe'defaultValue :: forall msg msg'. HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg') Source

    maybe'deprecation :: forall msg msg'. HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg') Source

    minimum :: forall msg msg'. HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    numberAttr :: forall msg msg'. HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg') Source

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') Source

    outputArg :: forall msg msg'. HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg') Source

    summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') Source

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') Source

    typeAttr :: forall msg msg'. HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg') Source

    typeListAttr :: forall msg msg'. HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg') Source

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html new file mode 100644 index 0000000..b529bd1 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.ResourceHandle

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.ResourceHandle

    Documentation

    container :: forall msg msg'. HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg') Source

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') Source

    hashCode :: forall msg msg'. HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg') Source

    maybeTypeName :: forall msg msg'. HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html new file mode 100644 index 0000000..f0dcc5c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Tensor

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Tensor

    Documentation

    data TensorProto Source

    Instances

    Eq TensorProto Source 
    Show TensorProto Source 
    Default TensorProto Source 
    Message TensorProto Source 
    HasField "boolVal" TensorProto TensorProto Source 
    HasField "dcomplexVal" TensorProto TensorProto Source 
    HasField "doubleVal" TensorProto TensorProto Source 
    HasField "dtype" TensorProto TensorProto Source 
    HasField "floatVal" TensorProto TensorProto Source 
    HasField "halfVal" TensorProto TensorProto Source 
    HasField "int64Val" TensorProto TensorProto Source 
    HasField "intVal" TensorProto TensorProto Source 
    HasField "maybe'tensorShape" TensorProto TensorProto Source 
    HasField "resourceHandleVal" TensorProto TensorProto Source 
    HasField "scomplexVal" TensorProto TensorProto Source 
    HasField "stringVal" TensorProto TensorProto Source 
    HasField "tensorContent" TensorProto TensorProto Source 
    HasField "tensorShape" TensorProto TensorProto Source 
    HasField "versionNumber" TensorProto TensorProto Source 
    type Field "boolVal" TensorProto = [Bool] Source 
    type Field "dcomplexVal" TensorProto = [Double] Source 
    type Field "doubleVal" TensorProto = [Double] Source 
    type Field "dtype" TensorProto = DataType Source 
    type Field "floatVal" TensorProto = [Float] Source 
    type Field "halfVal" TensorProto = [Int32] Source 
    type Field "int64Val" TensorProto = [Int64] Source 
    type Field "intVal" TensorProto = [Int32] Source 
    type Field "maybe'tensorShape" TensorProto = Maybe TensorShapeProto Source 
    type Field "resourceHandleVal" TensorProto = [ResourceHandle] Source 
    type Field "scomplexVal" TensorProto = [Float] Source 
    type Field "stringVal" TensorProto = [ByteString] Source 
    type Field "tensorContent" TensorProto = ByteString Source 
    type Field "tensorShape" TensorProto = TensorShapeProto Source 
    type Field "versionNumber" TensorProto = Int32 Source 

    boolVal :: forall msg msg'. HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg') Source

    dcomplexVal :: forall msg msg'. HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg') Source

    doubleVal :: forall msg msg'. HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg') Source

    dtype :: forall msg msg'. HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg') Source

    floatVal :: forall msg msg'. HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg') Source

    halfVal :: forall msg msg'. HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg') Source

    int64Val :: forall msg msg'. HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg') Source

    intVal :: forall msg msg'. HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg') Source

    maybe'tensorShape :: forall msg msg'. HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg') Source

    resourceHandleVal :: forall msg msg'. HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg') Source

    scomplexVal :: forall msg msg'. HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg') Source

    stringVal :: forall msg msg'. HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg') Source

    tensorContent :: forall msg msg'. HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg') Source

    tensorShape :: forall msg msg'. HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg') Source

    versionNumber :: forall msg msg'. HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html new file mode 100644 index 0000000..3db84a4 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorShape

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.TensorShape

    Documentation

    dim :: forall msg msg'. HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    size :: forall msg msg'. HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg') Source

    unknownRank :: forall msg msg'. HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html new file mode 100644 index 0000000..39ff85a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Types

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html new file mode 100644 index 0000000..c1f492f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Config

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Protobuf.Config

    Documentation

    data ConfigProto Source

    Instances

    Eq ConfigProto Source 
    Show ConfigProto Source 
    Default ConfigProto Source 
    Message ConfigProto Source 
    HasField "allowSoftPlacement" ConfigProto ConfigProto Source 
    HasField "deviceCount" ConfigProto ConfigProto Source 
    HasField "deviceFilters" ConfigProto ConfigProto Source 
    HasField "gpuOptions" ConfigProto ConfigProto Source 
    HasField "graphOptions" ConfigProto ConfigProto Source 
    HasField "interOpParallelismThreads" ConfigProto ConfigProto Source 
    HasField "intraOpParallelismThreads" ConfigProto ConfigProto Source 
    HasField "logDevicePlacement" ConfigProto ConfigProto Source 
    HasField "maybe'gpuOptions" ConfigProto ConfigProto Source 
    HasField "maybe'graphOptions" ConfigProto ConfigProto Source 
    HasField "operationTimeoutInMs" ConfigProto ConfigProto Source 
    HasField "placementPeriod" ConfigProto ConfigProto Source 
    HasField "sessionInterOpThreadPool" ConfigProto ConfigProto Source 
    HasField "usePerSessionThreads" ConfigProto ConfigProto Source 
    type Field "allowSoftPlacement" ConfigProto = Bool Source 
    type Field "deviceCount" ConfigProto = Map Text Int32 Source 
    type Field "deviceFilters" ConfigProto = [Text] Source 
    type Field "gpuOptions" ConfigProto = GPUOptions Source 
    type Field "graphOptions" ConfigProto = GraphOptions Source 
    type Field "interOpParallelismThreads" ConfigProto = Int32 Source 
    type Field "intraOpParallelismThreads" ConfigProto = Int32 Source 
    type Field "logDevicePlacement" ConfigProto = Bool Source 
    type Field "maybe'gpuOptions" ConfigProto = Maybe GPUOptions Source 
    type Field "maybe'graphOptions" ConfigProto = Maybe GraphOptions Source 
    type Field "operationTimeoutInMs" ConfigProto = Int64 Source 
    type Field "placementPeriod" ConfigProto = Int32 Source 
    type Field "sessionInterOpThreadPool" ConfigProto = [ThreadPoolOptionProto] Source 
    type Field "usePerSessionThreads" ConfigProto = Bool Source 

    data GPUOptions Source

    Instances

    Eq GPUOptions Source 
    Show GPUOptions Source 
    Default GPUOptions Source 
    Message GPUOptions Source 
    HasField "allocatorType" GPUOptions GPUOptions Source 
    HasField "allowGrowth" GPUOptions GPUOptions Source 
    HasField "deferredDeletionBytes" GPUOptions GPUOptions Source 
    HasField "perProcessGpuMemoryFraction" GPUOptions GPUOptions Source 
    HasField "visibleDeviceList" GPUOptions GPUOptions Source 
    type Field "allocatorType" GPUOptions = Text Source 
    type Field "allowGrowth" GPUOptions = Bool Source 
    type Field "deferredDeletionBytes" GPUOptions = Int64 Source 
    type Field "perProcessGpuMemoryFraction" GPUOptions = Double Source 
    type Field "visibleDeviceList" GPUOptions = Text Source 

    data GraphOptions Source

    Instances

    Eq GraphOptions Source 
    Show GraphOptions Source 
    Default GraphOptions Source 
    Message GraphOptions Source 
    HasField "buildCostModel" GraphOptions GraphOptions Source 
    HasField "buildCostModelAfter" GraphOptions GraphOptions Source 
    HasField "enableBfloat16Sendrecv" GraphOptions GraphOptions Source 
    HasField "enableRecvScheduling" GraphOptions GraphOptions Source 
    HasField "inferShapes" GraphOptions GraphOptions Source 
    HasField "maybe'optimizerOptions" GraphOptions GraphOptions Source 
    HasField "optimizerOptions" GraphOptions GraphOptions Source 
    HasField "placePrunedGraph" GraphOptions GraphOptions Source 
    HasField "timelineStep" GraphOptions GraphOptions Source 
    type Field "buildCostModel" GraphOptions = Int64 Source 
    type Field "buildCostModelAfter" GraphOptions = Int64 Source 
    type Field "enableBfloat16Sendrecv" GraphOptions = Bool Source 
    type Field "enableRecvScheduling" GraphOptions = Bool Source 
    type Field "inferShapes" GraphOptions = Bool Source 
    type Field "maybe'optimizerOptions" GraphOptions = Maybe OptimizerOptions Source 
    type Field "optimizerOptions" GraphOptions = OptimizerOptions Source 
    type Field "placePrunedGraph" GraphOptions = Bool Source 
    type Field "timelineStep" GraphOptions = Int32 Source 

    data RunMetadata Source

    Instances

    Eq RunMetadata Source 
    Show RunMetadata Source 
    Default RunMetadata Source 
    Message RunMetadata Source 
    HasField "costGraph" RunMetadata RunMetadata Source 
    HasField "maybe'costGraph" RunMetadata RunMetadata Source 
    HasField "maybe'stepStats" RunMetadata RunMetadata Source 
    HasField "partitionGraphs" RunMetadata RunMetadata Source 
    HasField "stepStats" RunMetadata RunMetadata Source 
    type Field "costGraph" RunMetadata Source 
    type Field "maybe'costGraph" RunMetadata Source 
    type Field "maybe'stepStats" RunMetadata Source 
    type Field "partitionGraphs" RunMetadata = [GraphDef] Source 
    type Field "stepStats" RunMetadata Source 

    data RunOptions Source

    Instances

    Eq RunOptions Source 
    Show RunOptions Source 
    Default RunOptions Source 
    Message RunOptions Source 
    HasField "debugTensorWatchOpts" RunOptions RunOptions Source 
    HasField "interOpThreadPool" RunOptions RunOptions Source 
    HasField "outputPartitionGraphs" RunOptions RunOptions Source 
    HasField "timeoutInMs" RunOptions RunOptions Source 
    HasField "traceLevel" RunOptions RunOptions Source 
    type Field "debugTensorWatchOpts" RunOptions = [DebugTensorWatch] Source 
    type Field "interOpThreadPool" RunOptions = Int32 Source 
    type Field "outputPartitionGraphs" RunOptions = Bool Source 
    type Field "timeoutInMs" RunOptions = Int64 Source 
    type Field "traceLevel" RunOptions = RunOptions'TraceLevel Source 

    allocatorType :: forall msg msg'. HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg') Source

    allowGrowth :: forall msg msg'. HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg') Source

    allowSoftPlacement :: forall msg msg'. HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg') Source

    buildCostModel :: forall msg msg'. HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg') Source

    buildCostModelAfter :: forall msg msg'. HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg') Source

    costGraph :: forall msg msg'. HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg') Source

    debugOps :: forall msg msg'. HasField "debugOps" msg msg' => Lens msg msg' (Field "debugOps" msg) (Field "debugOps" msg') Source

    debugTensorWatchOpts :: forall msg msg'. HasField "debugTensorWatchOpts" msg msg' => Lens msg msg' (Field "debugTensorWatchOpts" msg) (Field "debugTensorWatchOpts" msg') Source

    debugUrls :: forall msg msg'. HasField "debugUrls" msg msg' => Lens msg msg' (Field "debugUrls" msg) (Field "debugUrls" msg') Source

    deferredDeletionBytes :: forall msg msg'. HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg') Source

    deviceCount :: forall msg msg'. HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg') Source

    deviceFilters :: forall msg msg'. HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg') Source

    doCommonSubexpressionElimination :: forall msg msg'. HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg') Source

    doConstantFolding :: forall msg msg'. HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg') Source

    doFunctionInlining :: forall msg msg'. HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg') Source

    enableBfloat16Sendrecv :: forall msg msg'. HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg') Source

    enableRecvScheduling :: forall msg msg'. HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg') Source

    gpuOptions :: forall msg msg'. HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg') Source

    graphOptions :: forall msg msg'. HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg') Source

    inferShapes :: forall msg msg'. HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg') Source

    interOpParallelismThreads :: forall msg msg'. HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg') Source

    interOpThreadPool :: forall msg msg'. HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg') Source

    intraOpParallelismThreads :: forall msg msg'. HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    logDevicePlacement :: forall msg msg'. HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg') Source

    maybe'costGraph :: forall msg msg'. HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg') Source

    maybe'gpuOptions :: forall msg msg'. HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg') Source

    maybe'graphOptions :: forall msg msg'. HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg') Source

    maybe'optimizerOptions :: forall msg msg'. HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg') Source

    maybe'stepStats :: forall msg msg'. HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg') Source

    nodeName :: forall msg msg'. HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') Source

    numThreads :: forall msg msg'. HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg') Source

    operationTimeoutInMs :: forall msg msg'. HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg') Source

    optLevel :: forall msg msg'. HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg') Source

    optimizerOptions :: forall msg msg'. HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg') Source

    outputPartitionGraphs :: forall msg msg'. HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg') Source

    outputSlot :: forall msg msg'. HasField "outputSlot" msg msg' => Lens msg msg' (Field "outputSlot" msg) (Field "outputSlot" msg') Source

    partitionGraphs :: forall msg msg'. HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg') Source

    perProcessGpuMemoryFraction :: forall msg msg'. HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg') Source

    placePrunedGraph :: forall msg msg'. HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg') Source

    placementPeriod :: forall msg msg'. HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg') Source

    sessionInterOpThreadPool :: forall msg msg'. HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg') Source

    stepStats :: forall msg msg'. HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg') Source

    timelineStep :: forall msg msg'. HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg') Source

    timeoutInMs :: forall msg msg'. HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg') Source

    traceLevel :: forall msg msg'. HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg') Source

    usePerSessionThreads :: forall msg msg'. HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    visibleDeviceList :: forall msg msg'. HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg') Source

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html new file mode 100644 index 0000000..141d60d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - _)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - _

    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html new file mode 100644 index 0000000..ee12573 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - A)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html new file mode 100644 index 0000000..2fa311b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index

    allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    allowedValuesProto.Tensorflow.Core.Framework.OpDef
    allowGrowthProto.Tensorflow.Core.Protobuf.Config
    allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    attr 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Framework.OpDef
    AttrValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    AttrValue'ListValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    bProto.Tensorflow.Core.Framework.AttrValue
    boolValProto.Tensorflow.Core.Framework.Tensor
    buildCostModelProto.Tensorflow.Core.Protobuf.Config
    buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    ConfigProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    ConfigProto'DeviceCountEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    containerProto.Tensorflow.Core.Framework.ResourceHandle
    costGraphProto.Tensorflow.Core.Protobuf.Config
    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOpsProto.Tensorflow.Core.Protobuf.Config
    DebugTensorWatch 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    debugUrlsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    explanationProto.Tensorflow.Core.Framework.OpDef
    fProto.Tensorflow.Core.Framework.AttrValue
    floatValProto.Tensorflow.Core.Framework.Tensor
    funcProto.Tensorflow.Core.Framework.AttrValue
    GPUOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    GraphDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
    GraphOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    graphOptionsProto.Tensorflow.Core.Protobuf.Config
    halfValProto.Tensorflow.Core.Framework.Tensor
    hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    hasMinimumProto.Tensorflow.Core.Framework.OpDef
    iProto.Tensorflow.Core.Framework.AttrValue
    inferShapesProto.Tensorflow.Core.Protobuf.Config
    inputProto.Tensorflow.Core.Framework.NodeDef
    inputArgProto.Tensorflow.Core.Framework.OpDef
    int64ValProto.Tensorflow.Core.Framework.Tensor
    interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    intValProto.Tensorflow.Core.Framework.Tensor
    isAggregateProto.Tensorflow.Core.Framework.OpDef
    isCommutativeProto.Tensorflow.Core.Framework.OpDef
    isRefProto.Tensorflow.Core.Framework.OpDef
    isStatefulProto.Tensorflow.Core.Framework.OpDef
    key 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Protobuf.Config
    libraryProto.Tensorflow.Core.Framework.Graph
    listProto.Tensorflow.Core.Framework.AttrValue
    logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    maybe'bProto.Tensorflow.Core.Framework.AttrValue
    maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
    maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
    maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
    maybe'fProto.Tensorflow.Core.Framework.AttrValue
    maybe'funcProto.Tensorflow.Core.Framework.AttrValue
    maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'iProto.Tensorflow.Core.Framework.AttrValue
    maybe'libraryProto.Tensorflow.Core.Framework.Graph
    maybe'listProto.Tensorflow.Core.Framework.AttrValue
    maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
    maybe'sProto.Tensorflow.Core.Framework.AttrValue
    maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
    maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
    maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
    maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
    maybe'value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    maybe'versionsProto.Tensorflow.Core.Framework.Graph
    maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    minimumProto.Tensorflow.Core.Framework.OpDef
    name 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.TensorShape
    3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    5 (Function)Proto.Tensorflow.Core.Framework.OpDef
    NameAttrList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    NameAttrList'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    nodeProto.Tensorflow.Core.Framework.Graph
    NodeDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    NodeDef'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    nodeNameProto.Tensorflow.Core.Protobuf.Config
    numberAttrProto.Tensorflow.Core.Framework.OpDef
    numThreadsProto.Tensorflow.Core.Protobuf.Config
    op 
    1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    OpDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'ArgDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'AttrDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDeprecation 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    OpList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OptimizerOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
    optLevelProto.Tensorflow.Core.Protobuf.Config
    outputArgProto.Tensorflow.Core.Framework.OpDef
    outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    outputSlotProto.Tensorflow.Core.Protobuf.Config
    partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    placeholderProto.Tensorflow.Core.Framework.AttrValue
    placementPeriodProto.Tensorflow.Core.Protobuf.Config
    placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    ResourceHandle 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
    resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    RunMetadata 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    RunOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
    sProto.Tensorflow.Core.Framework.AttrValue
    scomplexValProto.Tensorflow.Core.Framework.Tensor
    sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    shapeProto.Tensorflow.Core.Framework.AttrValue
    sizeProto.Tensorflow.Core.Framework.TensorShape
    stepStatsProto.Tensorflow.Core.Protobuf.Config
    stringValProto.Tensorflow.Core.Framework.Tensor
    summaryProto.Tensorflow.Core.Framework.OpDef
    tensorProto.Tensorflow.Core.Framework.AttrValue
    tensorContentProto.Tensorflow.Core.Framework.Tensor
    TensorProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
    tensorShapeProto.Tensorflow.Core.Framework.Tensor
    TensorShapeProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    TensorShapeProto'Dim 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    ThreadPoolOptionProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    timelineStepProto.Tensorflow.Core.Protobuf.Config
    timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    traceLevelProto.Tensorflow.Core.Protobuf.Config
    type' 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    typeAttrProto.Tensorflow.Core.Framework.OpDef
    typeListAttrProto.Tensorflow.Core.Framework.OpDef
    unknownRankProto.Tensorflow.Core.Framework.TensorShape
    usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Protobuf.Config
    version 
    1 (Function)Proto.Tensorflow.Core.Framework.OpDef
    2 (Function)Proto.Tensorflow.Core.Framework.Graph
    versionNumberProto.Tensorflow.Core.Framework.Tensor
    versionsProto.Tensorflow.Core.Framework.Graph
    visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html new file mode 100644 index 0000000..ae55437 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - B)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html new file mode 100644 index 0000000..f072b65 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - C)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html new file mode 100644 index 0000000..9d5266e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - D)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - D

    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOpsProto.Tensorflow.Core.Protobuf.Config
    DebugTensorWatch 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    debugUrlsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html new file mode 100644 index 0000000..75c8281 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - E)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html new file mode 100644 index 0000000..38b9899 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - F)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html new file mode 100644 index 0000000..48b269f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - G)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html new file mode 100644 index 0000000..e71b627 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - H)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html new file mode 100644 index 0000000..a29b263 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - I)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html new file mode 100644 index 0000000..8a70b07 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - K)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html new file mode 100644 index 0000000..e9e6c5c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - L)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html new file mode 100644 index 0000000..3e02ce7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - M)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html new file mode 100644 index 0000000..89af93f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - N)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html new file mode 100644 index 0000000..417b5cd --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - O)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html new file mode 100644 index 0000000..27bb76a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - P)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html new file mode 100644 index 0000000..2a83347 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - R)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html new file mode 100644 index 0000000..cb40582 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - S)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html new file mode 100644 index 0000000..ac03d34 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - T)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html new file mode 100644 index 0000000..c48aaf7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - U)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html new file mode 100644 index 0000000..1f9d2fe --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - V)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html new file mode 100644 index 0000000..a016a72 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/frames.html b/docs/haddock/tensorflow-proto-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-proto-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/index.html b/docs/haddock/tensorflow-proto-0.1.0.0/index.html new file mode 100644 index 0000000..fb07a5b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html new file mode 100644 index 0000000..a05d215 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-AttrValue.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.AttrValue

    Proto.Tensorflow.Core.Framework.AttrValue

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html new file mode 100644 index 0000000..14f4318 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Graph.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Graph

    Proto.Tensorflow.Core.Framework.Graph

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html new file mode 100644 index 0000000..7d65c77 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-NodeDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.NodeDef

    Proto.Tensorflow.Core.Framework.NodeDef

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html new file mode 100644 index 0000000..2cd795c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-OpDef.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.OpDef

    Proto.Tensorflow.Core.Framework.OpDef

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html new file mode 100644 index 0000000..521fe5c --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.ResourceHandle

    Proto.Tensorflow.Core.Framework.ResourceHandle

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html new file mode 100644 index 0000000..275f22f --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Tensor.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Tensor

    Proto.Tensorflow.Core.Framework.Tensor

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html new file mode 100644 index 0000000..29f6d63 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-TensorShape.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.TensorShape

    Proto.Tensorflow.Core.Framework.TensorShape

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html new file mode 100644 index 0000000..ab4e0a2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Types.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Types

    Proto.Tensorflow.Core.Framework.Types

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html new file mode 100644 index 0000000..6854d28 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Protobuf.Config

    Proto.Tensorflow.Core.Protobuf.Config

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/minus.gif b/docs/haddock/tensorflow-proto-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/plus.gif b/docs/haddock/tensorflow-proto-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html new file mode 100644 index 0000000..403c9e4 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html @@ -0,0 +1,233 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/AllocationDescription.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/allocation_description.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.AllocationDescription where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +
    +data AllocationDescription = AllocationDescription{_AllocationDescription'requestedBytes
    +                                                   :: Data.Int.Int64,
    +                                                   _AllocationDescription'allocatedBytes ::
    +                                                   Data.Int.Int64,
    +                                                   _AllocationDescription'allocatorName ::
    +                                                   Data.Text.Text,
    +                                                   _AllocationDescription'allocationId ::
    +                                                   Data.Int.Int64,
    +                                                   _AllocationDescription'hasSingleReference ::
    +                                                   Prelude.Bool,
    +                                                   _AllocationDescription'ptr :: Data.Word.Word64}
    +                           deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "requestedBytes" AllocationDescription =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "requestedBytes"
    +         AllocationDescription AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocationDescription'requestedBytes
    +              (\ x__ y__ -> x__{_AllocationDescription'requestedBytes = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "allocatedBytes" AllocationDescription =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "allocatedBytes"
    +         AllocationDescription AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocationDescription'allocatedBytes
    +              (\ x__ y__ -> x__{_AllocationDescription'allocatedBytes = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "allocatorName" AllocationDescription =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "allocatorName"
    +         AllocationDescription AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocationDescription'allocatorName
    +              (\ x__ y__ -> x__{_AllocationDescription'allocatorName = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "allocationId" AllocationDescription =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "allocationId"
    +         AllocationDescription AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocationDescription'allocationId
    +              (\ x__ y__ -> x__{_AllocationDescription'allocationId = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "hasSingleReference" AllocationDescription =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "hasSingleReference"
    +         AllocationDescription AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _AllocationDescription'hasSingleReference
    +              (\ x__ y__ -> x__{_AllocationDescription'hasSingleReference = y__})
    +
    +type instance Data.ProtoLens.Field "ptr" AllocationDescription =
    +     Data.Word.Word64
    +
    +instance Data.ProtoLens.HasField "ptr" AllocationDescription
    +         AllocationDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocationDescription'ptr
    +              (\ x__ y__ -> x__{_AllocationDescription'ptr = y__})
    +
    +instance Data.Default.Class.Default AllocationDescription where
    +        def
    +          = AllocationDescription{_AllocationDescription'requestedBytes =
    +                                    Data.ProtoLens.fieldDefault,
    +                                  _AllocationDescription'allocatedBytes =
    +                                    Data.ProtoLens.fieldDefault,
    +                                  _AllocationDescription'allocatorName =
    +                                    Data.ProtoLens.fieldDefault,
    +                                  _AllocationDescription'allocationId = Data.ProtoLens.fieldDefault,
    +                                  _AllocationDescription'hasSingleReference =
    +                                    Data.ProtoLens.fieldDefault,
    +                                  _AllocationDescription'ptr = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message AllocationDescription where
    +        descriptor
    +          = let requestedBytes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "requested_bytes"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional requestedBytes)
    +                allocatedBytes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocated_bytes"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatedBytes)
    +                allocatorName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
    +                allocationId__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocation_id"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
    +                hasSingleReference__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "has_single_reference"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         hasSingleReference)
    +                ptr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "ptr"
    +                      (Data.ProtoLens.UInt64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional ptr)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, requestedBytes__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, allocatedBytes__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, allocatorName__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, allocationId__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, hasSingleReference__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, ptr__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("requested_bytes", requestedBytes__field_descriptor),
    +                    ("allocated_bytes", allocatedBytes__field_descriptor),
    +                    ("allocator_name", allocatorName__field_descriptor),
    +                    ("allocation_id", allocationId__field_descriptor),
    +                    ("has_single_reference", hasSingleReference__field_descriptor),
    +                    ("ptr", ptr__field_descriptor)])
    +
    +allocatedBytes ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "allocatedBytes" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "allocatedBytes" msg)
    +                   (Data.ProtoLens.Field "allocatedBytes" msg')
    +allocatedBytes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allocatedBytes")
    +
    +allocationId ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "allocationId" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "allocationId" msg)
    +                 (Data.ProtoLens.Field "allocationId" msg')
    +allocationId
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "allocationId")
    +
    +allocatorName ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "allocatorName" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "allocatorName" msg)
    +                  (Data.ProtoLens.Field "allocatorName" msg')
    +allocatorName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allocatorName")
    +
    +hasSingleReference ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "hasSingleReference" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "hasSingleReference" msg)
    +                       (Data.ProtoLens.Field "hasSingleReference" msg')
    +hasSingleReference
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "hasSingleReference")
    +
    +ptr ::
    +    forall msg msg' . Data.ProtoLens.HasField "ptr" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "ptr" msg)
    +        (Data.ProtoLens.Field "ptr" msg')
    +ptr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "ptr")
    +
    +requestedBytes ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "requestedBytes" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "requestedBytes" msg)
    +                   (Data.ProtoLens.Field "requestedBytes" msg')
    +requestedBytes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "requestedBytes")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html new file mode 100644 index 0000000..7cdd141 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html @@ -0,0 +1,762 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/AttrValue.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/attr_value.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.AttrValue where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.Tensor
    +import qualified Proto.Tensorflow.Core.Framework.TensorShape
    +import qualified Proto.Tensorflow.Core.Framework.Types
    +
    +data AttrValue = AttrValue{_AttrValue's ::
    +                           Prelude.Maybe Data.ByteString.ByteString,
    +                           _AttrValue'i :: Prelude.Maybe Data.Int.Int64,
    +                           _AttrValue'f :: Prelude.Maybe Prelude.Float,
    +                           _AttrValue'b :: Prelude.Maybe Prelude.Bool,
    +                           _AttrValue'type' ::
    +                           Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType,
    +                           _AttrValue'shape ::
    +                           Prelude.Maybe
    +                             Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    +                           _AttrValue'tensor ::
    +                           Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
    +                           _AttrValue'list :: Prelude.Maybe AttrValue'ListValue,
    +                           _AttrValue'func :: Prelude.Maybe NameAttrList,
    +                           _AttrValue'placeholder :: Prelude.Maybe Data.Text.Text}
    +               deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "s" AttrValue =
    +     Data.ByteString.ByteString
    +
    +instance Data.ProtoLens.HasField "s" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe's
    +              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    +
    +type instance Data.ProtoLens.Field "maybe's" AttrValue =
    +     Prelude.Maybe Data.ByteString.ByteString
    +
    +instance Data.ProtoLens.HasField "maybe's" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue's
    +              (\ x__ y__ -> x__{_AttrValue's = y__})
    +
    +type instance Data.ProtoLens.Field "i" AttrValue = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "i" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'i
    +              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    +
    +type instance Data.ProtoLens.Field "maybe'i" AttrValue =
    +     Prelude.Maybe Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "maybe'i" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'i
    +              (\ x__ y__ -> x__{_AttrValue'i = y__})
    +
    +type instance Data.ProtoLens.Field "f" AttrValue = Prelude.Float
    +
    +instance Data.ProtoLens.HasField "f" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'f
    +              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    +
    +type instance Data.ProtoLens.Field "maybe'f" AttrValue =
    +     Prelude.Maybe Prelude.Float
    +
    +instance Data.ProtoLens.HasField "maybe'f" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'f
    +              (\ x__ y__ -> x__{_AttrValue'f = y__})
    +
    +type instance Data.ProtoLens.Field "b" AttrValue = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "b" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'b
    +              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    +
    +type instance Data.ProtoLens.Field "maybe'b" AttrValue =
    +     Prelude.Maybe Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "maybe'b" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'b
    +              (\ x__ y__ -> x__{_AttrValue'b = y__})
    +
    +type instance Data.ProtoLens.Field "type'" AttrValue =
    +     Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "type'" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'type'
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'type'" AttrValue =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "maybe'type'" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'type'
    +              (\ x__ y__ -> x__{_AttrValue'type' = y__})
    +
    +type instance Data.ProtoLens.Field "shape" AttrValue =
    +     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "shape" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'shape
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'shape" AttrValue =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "maybe'shape" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'shape
    +              (\ x__ y__ -> x__{_AttrValue'shape = y__})
    +
    +type instance Data.ProtoLens.Field "tensor" AttrValue =
    +     Proto.Tensorflow.Core.Framework.Tensor.TensorProto
    +
    +instance Data.ProtoLens.HasField "tensor" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'tensor
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'tensor" AttrValue =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto
    +
    +instance Data.ProtoLens.HasField "maybe'tensor" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'tensor
    +              (\ x__ y__ -> x__{_AttrValue'tensor = y__})
    +
    +type instance Data.ProtoLens.Field "list" AttrValue =
    +     AttrValue'ListValue
    +
    +instance Data.ProtoLens.HasField "list" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'list
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'list" AttrValue =
    +     Prelude.Maybe AttrValue'ListValue
    +
    +instance Data.ProtoLens.HasField "maybe'list" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'list
    +              (\ x__ y__ -> x__{_AttrValue'list = y__})
    +
    +type instance Data.ProtoLens.Field "func" AttrValue = NameAttrList
    +
    +instance Data.ProtoLens.HasField "func" AttrValue AttrValue where
    +        field _
    +          = (Prelude..) maybe'func
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'func" AttrValue =
    +     Prelude.Maybe NameAttrList
    +
    +instance Data.ProtoLens.HasField "maybe'func" AttrValue AttrValue
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'func
    +              (\ x__ y__ -> x__{_AttrValue'func = y__})
    +
    +type instance Data.ProtoLens.Field "placeholder" AttrValue =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "placeholder" AttrValue AttrValue
    +         where
    +        field _
    +          = (Prelude..) maybe'placeholder
    +              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    +
    +type instance Data.ProtoLens.Field "maybe'placeholder" AttrValue =
    +     Prelude.Maybe Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "maybe'placeholder" AttrValue
    +         AttrValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'placeholder
    +              (\ x__ y__ -> x__{_AttrValue'placeholder = y__})
    +
    +instance Data.Default.Class.Default AttrValue where
    +        def
    +          = AttrValue{_AttrValue's = Prelude.Nothing,
    +                      _AttrValue'i = Prelude.Nothing, _AttrValue'f = Prelude.Nothing,
    +                      _AttrValue'b = Prelude.Nothing, _AttrValue'type' = Prelude.Nothing,
    +                      _AttrValue'shape = Prelude.Nothing,
    +                      _AttrValue'tensor = Prelude.Nothing,
    +                      _AttrValue'list = Prelude.Nothing,
    +                      _AttrValue'func = Prelude.Nothing,
    +                      _AttrValue'placeholder = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message AttrValue where
    +        descriptor
    +          = let s__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "s"
    +                      (Data.ProtoLens.BytesField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    +                      (Data.ProtoLens.OptionalField maybe's)
    +                i__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "i"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.OptionalField maybe'i)
    +                f__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "f"
    +                      (Data.ProtoLens.FloatField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    +                      (Data.ProtoLens.OptionalField maybe'f)
    +                b__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "b"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.OptionalField maybe'b)
    +                type'__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.OptionalField maybe'type')
    +                shape__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "shape"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    +                      (Data.ProtoLens.OptionalField maybe'shape)
    +                tensor__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "tensor"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
    +                      (Data.ProtoLens.OptionalField maybe'tensor)
    +                list__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "list"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor AttrValue'ListValue)
    +                      (Data.ProtoLens.OptionalField maybe'list)
    +                func__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "func"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor NameAttrList)
    +                      (Data.ProtoLens.OptionalField maybe'func)
    +                placeholder__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "placeholder"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.OptionalField maybe'placeholder)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, i__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, f__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, b__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, tensor__field_descriptor),
    +                    (Data.ProtoLens.Tag 1, list__field_descriptor),
    +                    (Data.ProtoLens.Tag 10, func__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, placeholder__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("s", s__field_descriptor), ("i", i__field_descriptor),
    +                    ("f", f__field_descriptor), ("b", b__field_descriptor),
    +                    ("type", type'__field_descriptor),
    +                    ("shape", shape__field_descriptor),
    +                    ("tensor", tensor__field_descriptor),
    +                    ("list", list__field_descriptor), ("func", func__field_descriptor),
    +                    ("placeholder", placeholder__field_descriptor)])
    +
    +data AttrValue'ListValue = AttrValue'ListValue{_AttrValue'ListValue's
    +                                               :: [Data.ByteString.ByteString],
    +                                               _AttrValue'ListValue'i :: [Data.Int.Int64],
    +                                               _AttrValue'ListValue'f :: [Prelude.Float],
    +                                               _AttrValue'ListValue'b :: [Prelude.Bool],
    +                                               _AttrValue'ListValue'type' ::
    +                                               [Proto.Tensorflow.Core.Framework.Types.DataType],
    +                                               _AttrValue'ListValue'shape ::
    +                                               [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto],
    +                                               _AttrValue'ListValue'tensor ::
    +                                               [Proto.Tensorflow.Core.Framework.Tensor.TensorProto]}
    +                         deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "s" AttrValue'ListValue =
    +     [Data.ByteString.ByteString]
    +
    +instance Data.ProtoLens.HasField "s" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue's
    +              (\ x__ y__ -> x__{_AttrValue'ListValue's = y__})
    +
    +type instance Data.ProtoLens.Field "i" AttrValue'ListValue =
    +     [Data.Int.Int64]
    +
    +instance Data.ProtoLens.HasField "i" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'i
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'i = y__})
    +
    +type instance Data.ProtoLens.Field "f" AttrValue'ListValue =
    +     [Prelude.Float]
    +
    +instance Data.ProtoLens.HasField "f" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'f
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'f = y__})
    +
    +type instance Data.ProtoLens.Field "b" AttrValue'ListValue =
    +     [Prelude.Bool]
    +
    +instance Data.ProtoLens.HasField "b" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'b
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'b = y__})
    +
    +type instance Data.ProtoLens.Field "type'" AttrValue'ListValue =
    +     [Proto.Tensorflow.Core.Framework.Types.DataType]
    +
    +instance Data.ProtoLens.HasField "type'" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'type'
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'type' = y__})
    +
    +type instance Data.ProtoLens.Field "shape" AttrValue'ListValue =
    +     [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto]
    +
    +instance Data.ProtoLens.HasField "shape" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'shape
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'shape = y__})
    +
    +type instance Data.ProtoLens.Field "tensor" AttrValue'ListValue =
    +     [Proto.Tensorflow.Core.Framework.Tensor.TensorProto]
    +
    +instance Data.ProtoLens.HasField "tensor" AttrValue'ListValue
    +         AttrValue'ListValue where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'tensor
    +              (\ x__ y__ -> x__{_AttrValue'ListValue'tensor = y__})
    +
    +instance Data.Default.Class.Default AttrValue'ListValue where
    +        def
    +          = AttrValue'ListValue{_AttrValue'ListValue's = [],
    +                                _AttrValue'ListValue'i = [], _AttrValue'ListValue'f = [],
    +                                _AttrValue'ListValue'b = [], _AttrValue'ListValue'type' = [],
    +                                _AttrValue'ListValue'shape = [], _AttrValue'ListValue'tensor = []}
    +
    +instance Data.ProtoLens.Message AttrValue'ListValue where
    +        descriptor
    +          = let s__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "s"
    +                      (Data.ProtoLens.BytesField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked s)
    +                i__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "i"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed i)
    +                f__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "f"
    +                      (Data.ProtoLens.FloatField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed f)
    +                b__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "b"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed b)
    +                type'__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed type')
    +                shape__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "shape"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked shape)
    +                tensor__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "tensor"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked tensor)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, i__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, f__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, b__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, tensor__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("s", s__field_descriptor), ("i", i__field_descriptor),
    +                    ("f", f__field_descriptor), ("b", b__field_descriptor),
    +                    ("type", type'__field_descriptor),
    +                    ("shape", shape__field_descriptor),
    +                    ("tensor", tensor__field_descriptor)])
    +
    +data NameAttrList = NameAttrList{_NameAttrList'name ::
    +                                 Data.Text.Text,
    +                                 _NameAttrList'attr :: Data.Map.Map Data.Text.Text AttrValue}
    +                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" NameAttrList =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" NameAttrList NameAttrList
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NameAttrList'name
    +              (\ x__ y__ -> x__{_NameAttrList'name = y__})
    +
    +type instance Data.ProtoLens.Field "attr" NameAttrList =
    +     Data.Map.Map Data.Text.Text AttrValue
    +
    +instance Data.ProtoLens.HasField "attr" NameAttrList NameAttrList
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NameAttrList'attr
    +              (\ x__ y__ -> x__{_NameAttrList'attr = y__})
    +
    +instance Data.Default.Class.Default NameAttrList where
    +        def
    +          = NameAttrList{_NameAttrList'name = Data.ProtoLens.fieldDefault,
    +                         _NameAttrList'attr = Data.Map.empty}
    +
    +instance Data.ProtoLens.Message NameAttrList where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                attr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "attr"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor NameAttrList'AttrEntry)
    +                      (Data.ProtoLens.MapField key value attr)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, attr__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor),
    +                    ("attr", attr__field_descriptor)])
    +
    +data NameAttrList'AttrEntry = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key
    +                                                     :: Data.Text.Text,
    +                                                     _NameAttrList'AttrEntry'value ::
    +                                                     Prelude.Maybe AttrValue}
    +                            deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "key" NameAttrList'AttrEntry =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "key" NameAttrList'AttrEntry
    +         NameAttrList'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'key
    +              (\ x__ y__ -> x__{_NameAttrList'AttrEntry'key = y__})
    +
    +type instance Data.ProtoLens.Field "value" NameAttrList'AttrEntry =
    +     AttrValue
    +
    +instance Data.ProtoLens.HasField "value" NameAttrList'AttrEntry
    +         NameAttrList'AttrEntry where
    +        field _
    +          = (Prelude..) maybe'value
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'value" NameAttrList'AttrEntry =
    +     Prelude.Maybe AttrValue
    +
    +instance Data.ProtoLens.HasField "maybe'value"
    +         NameAttrList'AttrEntry NameAttrList'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'value
    +              (\ x__ y__ -> x__{_NameAttrList'AttrEntry'value = y__})
    +
    +instance Data.Default.Class.Default NameAttrList'AttrEntry where
    +        def
    +          = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key =
    +                                     Data.ProtoLens.fieldDefault,
    +                                   _NameAttrList'AttrEntry'value = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message NameAttrList'AttrEntry where
    +        descriptor
    +          = let key__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "key"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    +                value__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "value"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor AttrValue)
    +                      (Data.ProtoLens.OptionalField maybe'value)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("key", key__field_descriptor),
    +                    ("value", value__field_descriptor)])
    +
    +attr ::
    +     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    +         (Data.ProtoLens.Field "attr" msg')
    +attr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    +
    +b ::
    +  forall msg msg' . Data.ProtoLens.HasField "b" msg msg' =>
    +    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "b" msg)
    +      (Data.ProtoLens.Field "b" msg')
    +b = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "b")
    +
    +f ::
    +  forall msg msg' . Data.ProtoLens.HasField "f" msg msg' =>
    +    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "f" msg)
    +      (Data.ProtoLens.Field "f" msg')
    +f = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "f")
    +
    +func ::
    +     forall msg msg' . Data.ProtoLens.HasField "func" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "func" msg)
    +         (Data.ProtoLens.Field "func" msg')
    +func
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "func")
    +
    +i ::
    +  forall msg msg' . Data.ProtoLens.HasField "i" msg msg' =>
    +    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "i" msg)
    +      (Data.ProtoLens.Field "i" msg')
    +i = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "i")
    +
    +key ::
    +    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    +        (Data.ProtoLens.Field "key" msg')
    +key
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    +
    +list ::
    +     forall msg msg' . Data.ProtoLens.HasField "list" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "list" msg)
    +         (Data.ProtoLens.Field "list" msg')
    +list
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "list")
    +
    +maybe'b ::
    +        forall msg msg' . Data.ProtoLens.HasField "maybe'b" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'b" msg)
    +            (Data.ProtoLens.Field "maybe'b" msg')
    +maybe'b
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'b")
    +
    +maybe'f ::
    +        forall msg msg' . Data.ProtoLens.HasField "maybe'f" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'f" msg)
    +            (Data.ProtoLens.Field "maybe'f" msg')
    +maybe'f
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'f")
    +
    +maybe'func ::
    +           forall msg msg' . Data.ProtoLens.HasField "maybe'func" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'func" msg)
    +               (Data.ProtoLens.Field "maybe'func" msg')
    +maybe'func
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'func")
    +
    +maybe'i ::
    +        forall msg msg' . Data.ProtoLens.HasField "maybe'i" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'i" msg)
    +            (Data.ProtoLens.Field "maybe'i" msg')
    +maybe'i
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'i")
    +
    +maybe'list ::
    +           forall msg msg' . Data.ProtoLens.HasField "maybe'list" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'list" msg)
    +               (Data.ProtoLens.Field "maybe'list" msg')
    +maybe'list
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'list")
    +
    +maybe'placeholder ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "maybe'placeholder" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "maybe'placeholder" msg)
    +                      (Data.ProtoLens.Field "maybe'placeholder" msg')
    +maybe'placeholder
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'placeholder")
    +
    +maybe's ::
    +        forall msg msg' . Data.ProtoLens.HasField "maybe's" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe's" msg)
    +            (Data.ProtoLens.Field "maybe's" msg')
    +maybe's
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe's")
    +
    +maybe'shape ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    +                (Data.ProtoLens.Field "maybe'shape" msg')
    +maybe'shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    +
    +maybe'tensor ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "maybe'tensor" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "maybe'tensor" msg)
    +                 (Data.ProtoLens.Field "maybe'tensor" msg')
    +maybe'tensor
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'tensor")
    +
    +maybe'type' ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'type'" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'type'" msg)
    +                (Data.ProtoLens.Field "maybe'type'" msg')
    +maybe'type'
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'type'")
    +
    +maybe'value ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    +                (Data.ProtoLens.Field "maybe'value" msg')
    +maybe'value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    +placeholder ::
    +            forall msg msg' . Data.ProtoLens.HasField "placeholder" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "placeholder" msg)
    +                (Data.ProtoLens.Field "placeholder" msg')
    +placeholder
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "placeholder")
    +
    +s ::
    +  forall msg msg' . Data.ProtoLens.HasField "s" msg msg' =>
    +    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "s" msg)
    +      (Data.ProtoLens.Field "s" msg')
    +s = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "s")
    +
    +shape ::
    +      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    +          (Data.ProtoLens.Field "shape" msg')
    +shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    +
    +tensor ::
    +       forall msg msg' . Data.ProtoLens.HasField "tensor" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "tensor" msg)
    +           (Data.ProtoLens.Field "tensor" msg')
    +tensor
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "tensor")
    +
    +type' ::
    +      forall msg msg' . Data.ProtoLens.HasField "type'" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "type'" msg)
    +          (Data.ProtoLens.Field "type'" msg')
    +type'
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "type'")
    +
    +value ::
    +      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    +          (Data.ProtoLens.Field "value" msg')
    +value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html new file mode 100644 index 0000000..4fd4bae --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html @@ -0,0 +1,570 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/CostGraph.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/cost_graph.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.CostGraph where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.TensorShape
    +import qualified Proto.Tensorflow.Core.Framework.Types
    +
    +data CostGraphDef = CostGraphDef{_CostGraphDef'node ::
    +                                 [CostGraphDef'Node]}
    +                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "node" CostGraphDef =
    +     [CostGraphDef'Node]
    +
    +instance Data.ProtoLens.HasField "node" CostGraphDef CostGraphDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'node
    +              (\ x__ y__ -> x__{_CostGraphDef'node = y__})
    +
    +instance Data.Default.Class.Default CostGraphDef where
    +        def = CostGraphDef{_CostGraphDef'node = []}
    +
    +instance Data.ProtoLens.Message CostGraphDef where
    +        descriptor
    +          = let node__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, node__field_descriptor)])
    +                (Data.Map.fromList [("node", node__field_descriptor)])
    +
    +data CostGraphDef'Node = CostGraphDef'Node{_CostGraphDef'Node'name
    +                                           :: Data.Text.Text,
    +                                           _CostGraphDef'Node'device :: Data.Text.Text,
    +                                           _CostGraphDef'Node'id :: Data.Int.Int32,
    +                                           _CostGraphDef'Node'inputInfo ::
    +                                           [CostGraphDef'Node'InputInfo],
    +                                           _CostGraphDef'Node'outputInfo ::
    +                                           [CostGraphDef'Node'OutputInfo],
    +                                           _CostGraphDef'Node'temporaryMemorySize :: Data.Int.Int64,
    +                                           _CostGraphDef'Node'computeCost :: Data.Int.Int64,
    +                                           _CostGraphDef'Node'isFinal :: Prelude.Bool,
    +                                           _CostGraphDef'Node'controlInput :: [Data.Int.Int32]}
    +                       deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" CostGraphDef'Node =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'name
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'name = y__})
    +
    +type instance Data.ProtoLens.Field "device" CostGraphDef'Node =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "device" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'device
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'device = y__})
    +
    +type instance Data.ProtoLens.Field "id" CostGraphDef'Node =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "id" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'id
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'id = y__})
    +
    +type instance Data.ProtoLens.Field "inputInfo" CostGraphDef'Node =
    +     [CostGraphDef'Node'InputInfo]
    +
    +instance Data.ProtoLens.HasField "inputInfo" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'inputInfo
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'inputInfo = y__})
    +
    +type instance Data.ProtoLens.Field "outputInfo" CostGraphDef'Node =
    +     [CostGraphDef'Node'OutputInfo]
    +
    +instance Data.ProtoLens.HasField "outputInfo" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'outputInfo
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'outputInfo = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "temporaryMemorySize" CostGraphDef'Node =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "temporaryMemorySize"
    +         CostGraphDef'Node CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _CostGraphDef'Node'temporaryMemorySize
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'temporaryMemorySize = y__})
    +
    +type instance Data.ProtoLens.Field "computeCost" CostGraphDef'Node
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "computeCost" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'computeCost
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'computeCost = y__})
    +
    +type instance Data.ProtoLens.Field "isFinal" CostGraphDef'Node =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "isFinal" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'isFinal
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'isFinal = y__})
    +
    +type instance Data.ProtoLens.Field "controlInput" CostGraphDef'Node
    +     = [Data.Int.Int32]
    +
    +instance Data.ProtoLens.HasField "controlInput" CostGraphDef'Node
    +         CostGraphDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'controlInput
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'controlInput = y__})
    +
    +instance Data.Default.Class.Default CostGraphDef'Node where
    +        def
    +          = CostGraphDef'Node{_CostGraphDef'Node'name =
    +                                Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'device = Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'id = Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'inputInfo = [],
    +                              _CostGraphDef'Node'outputInfo = [],
    +                              _CostGraphDef'Node'temporaryMemorySize =
    +                                Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'computeCost = Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'isFinal = Data.ProtoLens.fieldDefault,
    +                              _CostGraphDef'Node'controlInput = []}
    +
    +instance Data.ProtoLens.Message CostGraphDef'Node where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                device__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    +                id__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "id"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional id)
    +                inputInfo__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "input_info"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'InputInfo)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputInfo)
    +                outputInfo__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "output_info"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'OutputInfo)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputInfo)
    +                temporaryMemorySize__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "temporary_memory_size"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         temporaryMemorySize)
    +                computeCost__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "compute_cost"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional computeCost)
    +                isFinal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "is_final"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isFinal)
    +                controlInput__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "control_input"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked controlInput)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, device__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, id__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, inputInfo__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, outputInfo__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, temporaryMemorySize__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, computeCost__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, isFinal__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, controlInput__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor),
    +                    ("device", device__field_descriptor), ("id", id__field_descriptor),
    +                    ("input_info", inputInfo__field_descriptor),
    +                    ("output_info", outputInfo__field_descriptor),
    +                    ("temporary_memory_size", temporaryMemorySize__field_descriptor),
    +                    ("compute_cost", computeCost__field_descriptor),
    +                    ("is_final", isFinal__field_descriptor),
    +                    ("control_input", controlInput__field_descriptor)])
    +
    +data CostGraphDef'Node'InputInfo = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
    +                                                               :: Data.Int.Int32,
    +                                                               _CostGraphDef'Node'InputInfo'precedingPort
    +                                                               :: Data.Int.Int32}
    +                                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "precedingNode" CostGraphDef'Node'InputInfo =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "precedingNode"
    +         CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _CostGraphDef'Node'InputInfo'precedingNode
    +              (\ x__ y__ ->
    +                 x__{_CostGraphDef'Node'InputInfo'precedingNode = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "precedingPort" CostGraphDef'Node'InputInfo =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "precedingPort"
    +         CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _CostGraphDef'Node'InputInfo'precedingPort
    +              (\ x__ y__ ->
    +                 x__{_CostGraphDef'Node'InputInfo'precedingPort = y__})
    +
    +instance Data.Default.Class.Default CostGraphDef'Node'InputInfo
    +         where
    +        def
    +          = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
    +                                          = Data.ProtoLens.fieldDefault,
    +                                        _CostGraphDef'Node'InputInfo'precedingPort =
    +                                          Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message CostGraphDef'Node'InputInfo where
    +        descriptor
    +          = let precedingNode__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "preceding_node"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingNode)
    +                precedingPort__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "preceding_port"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingPort)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, precedingNode__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, precedingPort__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("preceding_node", precedingNode__field_descriptor),
    +                    ("preceding_port", precedingPort__field_descriptor)])
    +
    +data CostGraphDef'Node'OutputInfo = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size
    +                                                                 :: Data.Int.Int64,
    +                                                                 _CostGraphDef'Node'OutputInfo'aliasInputPort
    +                                                                 :: Data.Int.Int64,
    +                                                                 _CostGraphDef'Node'OutputInfo'shape
    +                                                                 ::
    +                                                                 Prelude.Maybe
    +                                                                   Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    +                                                                 _CostGraphDef'Node'OutputInfo'dtype
    +                                                                 ::
    +                                                                 Proto.Tensorflow.Core.Framework.Types.DataType}
    +                                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "size" CostGraphDef'Node'OutputInfo =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "size"
    +         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'size
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'size = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "aliasInputPort" CostGraphDef'Node'OutputInfo
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "aliasInputPort"
    +         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _CostGraphDef'Node'OutputInfo'aliasInputPort
    +              (\ x__ y__ ->
    +                 x__{_CostGraphDef'Node'OutputInfo'aliasInputPort = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "shape" CostGraphDef'Node'OutputInfo =
    +     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "shape"
    +         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    +        field _
    +          = (Prelude..) maybe'shape
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'shape" CostGraphDef'Node'OutputInfo =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "maybe'shape"
    +         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'shape
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'shape = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "dtype" CostGraphDef'Node'OutputInfo =
    +     Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "dtype"
    +         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    +        field _
    +          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'dtype
    +              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'dtype = y__})
    +
    +instance Data.Default.Class.Default CostGraphDef'Node'OutputInfo
    +         where
    +        def
    +          = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size =
    +                                           Data.ProtoLens.fieldDefault,
    +                                         _CostGraphDef'Node'OutputInfo'aliasInputPort =
    +                                           Data.ProtoLens.fieldDefault,
    +                                         _CostGraphDef'Node'OutputInfo'shape = Prelude.Nothing,
    +                                         _CostGraphDef'Node'OutputInfo'dtype =
    +                                           Data.Default.Class.def}
    +
    +instance Data.ProtoLens.Message CostGraphDef'Node'OutputInfo where
    +        descriptor
    +          = let size__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "size"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
    +                aliasInputPort__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "alias_input_port"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional aliasInputPort)
    +                shape__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "shape"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    +                      (Data.ProtoLens.OptionalField maybe'shape)
    +                dtype__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dtype"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, aliasInputPort__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, shape__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, dtype__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("size", size__field_descriptor),
    +                    ("alias_input_port", aliasInputPort__field_descriptor),
    +                    ("shape", shape__field_descriptor),
    +                    ("dtype", dtype__field_descriptor)])
    +
    +aliasInputPort ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "aliasInputPort" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "aliasInputPort" msg)
    +                   (Data.ProtoLens.Field "aliasInputPort" msg')
    +aliasInputPort
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "aliasInputPort")
    +
    +computeCost ::
    +            forall msg msg' . Data.ProtoLens.HasField "computeCost" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "computeCost" msg)
    +                (Data.ProtoLens.Field "computeCost" msg')
    +computeCost
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "computeCost")
    +
    +controlInput ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "controlInput" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "controlInput" msg)
    +                 (Data.ProtoLens.Field "controlInput" msg')
    +controlInput
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "controlInput")
    +
    +device ::
    +       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    +           (Data.ProtoLens.Field "device" msg')
    +device
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    +
    +dtype ::
    +      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    +          (Data.ProtoLens.Field "dtype" msg')
    +dtype
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    +
    +id ::
    +   forall msg msg' . Data.ProtoLens.HasField "id" msg msg' =>
    +     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "id" msg)
    +       (Data.ProtoLens.Field "id" msg')
    +id
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "id")
    +
    +inputInfo ::
    +          forall msg msg' . Data.ProtoLens.HasField "inputInfo" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inputInfo" msg)
    +              (Data.ProtoLens.Field "inputInfo" msg')
    +inputInfo
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inputInfo")
    +
    +isFinal ::
    +        forall msg msg' . Data.ProtoLens.HasField "isFinal" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isFinal" msg)
    +            (Data.ProtoLens.Field "isFinal" msg')
    +isFinal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isFinal")
    +
    +maybe'shape ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    +                (Data.ProtoLens.Field "maybe'shape" msg')
    +maybe'shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    +node ::
    +     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    +         (Data.ProtoLens.Field "node" msg')
    +node
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    +
    +outputInfo ::
    +           forall msg msg' . Data.ProtoLens.HasField "outputInfo" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputInfo" msg)
    +               (Data.ProtoLens.Field "outputInfo" msg')
    +outputInfo
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputInfo")
    +
    +precedingNode ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "precedingNode" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "precedingNode" msg)
    +                  (Data.ProtoLens.Field "precedingNode" msg')
    +precedingNode
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "precedingNode")
    +
    +precedingPort ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "precedingPort" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "precedingPort" msg)
    +                  (Data.ProtoLens.Field "precedingPort" msg')
    +precedingPort
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "precedingPort")
    +
    +shape ::
    +      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    +          (Data.ProtoLens.Field "shape" msg')
    +shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    +
    +size ::
    +     forall msg msg' . Data.ProtoLens.HasField "size" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "size" msg)
    +         (Data.ProtoLens.Field "size" msg')
    +size
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "size")
    +
    +temporaryMemorySize ::
    +                    forall msg msg' .
    +                      Data.ProtoLens.HasField "temporaryMemorySize" msg msg' =>
    +                      Lens.Family2.Lens msg msg'
    +                        (Data.ProtoLens.Field "temporaryMemorySize" msg)
    +                        (Data.ProtoLens.Field "temporaryMemorySize" msg')
    +temporaryMemorySize
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "temporaryMemorySize")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html new file mode 100644 index 0000000..7a2dcbf --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html @@ -0,0 +1,586 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Function.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/function.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.Function where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.AttrValue
    +import qualified Proto.Tensorflow.Core.Framework.NodeDef
    +import qualified Proto.Tensorflow.Core.Framework.OpDef
    +
    +data FunctionDef = FunctionDef{_FunctionDef'signature ::
    +                               Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef,
    +                               _FunctionDef'node :: [FunctionDef'Node],
    +                               _FunctionDef'nodeDef ::
    +                               [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
    +                               _FunctionDef'ret :: Data.Map.Map Data.Text.Text Data.Text.Text}
    +                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "signature" FunctionDef =
    +     Proto.Tensorflow.Core.Framework.OpDef.OpDef
    +
    +instance Data.ProtoLens.HasField "signature" FunctionDef
    +         FunctionDef where
    +        field _
    +          = (Prelude..) maybe'signature
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'signature" FunctionDef =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef
    +
    +instance Data.ProtoLens.HasField "maybe'signature" FunctionDef
    +         FunctionDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'signature
    +              (\ x__ y__ -> x__{_FunctionDef'signature = y__})
    +
    +type instance Data.ProtoLens.Field "node" FunctionDef =
    +     [FunctionDef'Node]
    +
    +instance Data.ProtoLens.HasField "node" FunctionDef FunctionDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'node
    +              (\ x__ y__ -> x__{_FunctionDef'node = y__})
    +
    +type instance Data.ProtoLens.Field "nodeDef" FunctionDef =
    +     [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef]
    +
    +instance Data.ProtoLens.HasField "nodeDef" FunctionDef FunctionDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'nodeDef
    +              (\ x__ y__ -> x__{_FunctionDef'nodeDef = y__})
    +
    +type instance Data.ProtoLens.Field "ret" FunctionDef =
    +     Data.Map.Map Data.Text.Text Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "ret" FunctionDef FunctionDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'ret
    +              (\ x__ y__ -> x__{_FunctionDef'ret = y__})
    +
    +instance Data.Default.Class.Default FunctionDef where
    +        def
    +          = FunctionDef{_FunctionDef'signature = Prelude.Nothing,
    +                        _FunctionDef'node = [], _FunctionDef'nodeDef = [],
    +                        _FunctionDef'ret = Data.Map.empty}
    +
    +instance Data.ProtoLens.Message FunctionDef where
    +        descriptor
    +          = let signature__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "signature"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.OpDef.OpDef)
    +                      (Data.ProtoLens.OptionalField maybe'signature)
    +                node__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'Node)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    +                nodeDef__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node_def"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeDef)
    +                ret__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "ret"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'RetEntry)
    +                      (Data.ProtoLens.MapField key value ret)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, signature__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, node__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, nodeDef__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, ret__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("signature", signature__field_descriptor),
    +                    ("node", node__field_descriptor),
    +                    ("node_def", nodeDef__field_descriptor),
    +                    ("ret", ret__field_descriptor)])
    +
    +data FunctionDef'Node = FunctionDef'Node{_FunctionDef'Node'ret ::
    +                                         [Data.Text.Text],
    +                                         _FunctionDef'Node'op :: Data.Text.Text,
    +                                         _FunctionDef'Node'arg :: [Data.Text.Text],
    +                                         _FunctionDef'Node'dep :: [Data.Text.Text],
    +                                         _FunctionDef'Node'attr ::
    +                                         Data.Map.Map Data.Text.Text
    +                                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    +                      deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "ret" FunctionDef'Node =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "ret" FunctionDef'Node
    +         FunctionDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'ret
    +              (\ x__ y__ -> x__{_FunctionDef'Node'ret = y__})
    +
    +type instance Data.ProtoLens.Field "op" FunctionDef'Node =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "op" FunctionDef'Node
    +         FunctionDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'op
    +              (\ x__ y__ -> x__{_FunctionDef'Node'op = y__})
    +
    +type instance Data.ProtoLens.Field "arg" FunctionDef'Node =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "arg" FunctionDef'Node
    +         FunctionDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'arg
    +              (\ x__ y__ -> x__{_FunctionDef'Node'arg = y__})
    +
    +type instance Data.ProtoLens.Field "dep" FunctionDef'Node =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "dep" FunctionDef'Node
    +         FunctionDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'dep
    +              (\ x__ y__ -> x__{_FunctionDef'Node'dep = y__})
    +
    +type instance Data.ProtoLens.Field "attr" FunctionDef'Node =
    +     Data.Map.Map Data.Text.Text
    +       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "attr" FunctionDef'Node
    +         FunctionDef'Node where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'attr
    +              (\ x__ y__ -> x__{_FunctionDef'Node'attr = y__})
    +
    +instance Data.Default.Class.Default FunctionDef'Node where
    +        def
    +          = FunctionDef'Node{_FunctionDef'Node'ret = [],
    +                             _FunctionDef'Node'op = Data.ProtoLens.fieldDefault,
    +                             _FunctionDef'Node'arg = [], _FunctionDef'Node'dep = [],
    +                             _FunctionDef'Node'attr = Data.Map.empty}
    +
    +instance Data.ProtoLens.Message FunctionDef'Node where
    +        descriptor
    +          = let ret__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "ret"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked ret)
    +                op__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "op"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
    +                arg__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "arg"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked arg)
    +                dep__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dep"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked dep)
    +                attr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "attr"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'Node'AttrEntry)
    +                      (Data.ProtoLens.MapField key value attr)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, ret__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, op__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, arg__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, dep__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, attr__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("ret", ret__field_descriptor), ("op", op__field_descriptor),
    +                    ("arg", arg__field_descriptor), ("dep", dep__field_descriptor),
    +                    ("attr", attr__field_descriptor)])
    +
    +data FunctionDef'Node'AttrEntry = FunctionDef'Node'AttrEntry{_FunctionDef'Node'AttrEntry'key
    +                                                             :: Data.Text.Text,
    +                                                             _FunctionDef'Node'AttrEntry'value ::
    +                                                             Prelude.Maybe
    +                                                               Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    +                                deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "key" FunctionDef'Node'AttrEntry
    +     = Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "key" FunctionDef'Node'AttrEntry
    +         FunctionDef'Node'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'AttrEntry'key
    +              (\ x__ y__ -> x__{_FunctionDef'Node'AttrEntry'key = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "value" FunctionDef'Node'AttrEntry =
    +     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "value" FunctionDef'Node'AttrEntry
    +         FunctionDef'Node'AttrEntry where
    +        field _
    +          = (Prelude..) maybe'value
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'value" FunctionDef'Node'AttrEntry =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "maybe'value"
    +         FunctionDef'Node'AttrEntry FunctionDef'Node'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'Node'AttrEntry'value
    +              (\ x__ y__ -> x__{_FunctionDef'Node'AttrEntry'value = y__})
    +
    +instance Data.Default.Class.Default FunctionDef'Node'AttrEntry
    +         where
    +        def
    +          = FunctionDef'Node'AttrEntry{_FunctionDef'Node'AttrEntry'key =
    +                                         Data.ProtoLens.fieldDefault,
    +                                       _FunctionDef'Node'AttrEntry'value = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message FunctionDef'Node'AttrEntry where
    +        descriptor
    +          = let key__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "key"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    +                value__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "value"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    +                      (Data.ProtoLens.OptionalField maybe'value)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("key", key__field_descriptor),
    +                    ("value", value__field_descriptor)])
    +
    +data FunctionDef'RetEntry = FunctionDef'RetEntry{_FunctionDef'RetEntry'key
    +                                                 :: Data.Text.Text,
    +                                                 _FunctionDef'RetEntry'value :: Data.Text.Text}
    +                          deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "key" FunctionDef'RetEntry =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "key" FunctionDef'RetEntry
    +         FunctionDef'RetEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'key
    +              (\ x__ y__ -> x__{_FunctionDef'RetEntry'key = y__})
    +
    +type instance Data.ProtoLens.Field "value" FunctionDef'RetEntry =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "value" FunctionDef'RetEntry
    +         FunctionDef'RetEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'value
    +              (\ x__ y__ -> x__{_FunctionDef'RetEntry'value = y__})
    +
    +instance Data.Default.Class.Default FunctionDef'RetEntry where
    +        def
    +          = FunctionDef'RetEntry{_FunctionDef'RetEntry'key =
    +                                   Data.ProtoLens.fieldDefault,
    +                                 _FunctionDef'RetEntry'value = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message FunctionDef'RetEntry where
    +        descriptor
    +          = let key__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "key"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    +                value__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "value"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("key", key__field_descriptor),
    +                    ("value", value__field_descriptor)])
    +
    +data FunctionDefLibrary = FunctionDefLibrary{_FunctionDefLibrary'function
    +                                             :: [FunctionDef],
    +                                             _FunctionDefLibrary'gradient :: [GradientDef]}
    +                        deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "function" FunctionDefLibrary =
    +     [FunctionDef]
    +
    +instance Data.ProtoLens.HasField "function" FunctionDefLibrary
    +         FunctionDefLibrary where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDefLibrary'function
    +              (\ x__ y__ -> x__{_FunctionDefLibrary'function = y__})
    +
    +type instance Data.ProtoLens.Field "gradient" FunctionDefLibrary =
    +     [GradientDef]
    +
    +instance Data.ProtoLens.HasField "gradient" FunctionDefLibrary
    +         FunctionDefLibrary where
    +        field _
    +          = Lens.Family2.Unchecked.lens _FunctionDefLibrary'gradient
    +              (\ x__ y__ -> x__{_FunctionDefLibrary'gradient = y__})
    +
    +instance Data.Default.Class.Default FunctionDefLibrary where
    +        def
    +          = FunctionDefLibrary{_FunctionDefLibrary'function = [],
    +                               _FunctionDefLibrary'gradient = []}
    +
    +instance Data.ProtoLens.Message FunctionDefLibrary where
    +        descriptor
    +          = let function__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "function"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor FunctionDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked function)
    +                gradient__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "gradient"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor GradientDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked gradient)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, function__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, gradient__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("function", function__field_descriptor),
    +                    ("gradient", gradient__field_descriptor)])
    +
    +data GradientDef = GradientDef{_GradientDef'functionName ::
    +                               Data.Text.Text,
    +                               _GradientDef'gradientFunc :: Data.Text.Text}
    +                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "functionName" GradientDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "functionName" GradientDef
    +         GradientDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GradientDef'functionName
    +              (\ x__ y__ -> x__{_GradientDef'functionName = y__})
    +
    +type instance Data.ProtoLens.Field "gradientFunc" GradientDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "gradientFunc" GradientDef
    +         GradientDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GradientDef'gradientFunc
    +              (\ x__ y__ -> x__{_GradientDef'gradientFunc = y__})
    +
    +instance Data.Default.Class.Default GradientDef where
    +        def
    +          = GradientDef{_GradientDef'functionName =
    +                          Data.ProtoLens.fieldDefault,
    +                        _GradientDef'gradientFunc = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message GradientDef where
    +        descriptor
    +          = let functionName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "function_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional functionName)
    +                gradientFunc__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "gradient_func"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional gradientFunc)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, functionName__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, gradientFunc__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("function_name", functionName__field_descriptor),
    +                    ("gradient_func", gradientFunc__field_descriptor)])
    +
    +arg ::
    +    forall msg msg' . Data.ProtoLens.HasField "arg" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "arg" msg)
    +        (Data.ProtoLens.Field "arg" msg')
    +arg
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "arg")
    +
    +attr ::
    +     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    +         (Data.ProtoLens.Field "attr" msg')
    +attr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    +
    +dep ::
    +    forall msg msg' . Data.ProtoLens.HasField "dep" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dep" msg)
    +        (Data.ProtoLens.Field "dep" msg')
    +dep
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dep")
    +
    +function ::
    +         forall msg msg' . Data.ProtoLens.HasField "function" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "function" msg)
    +             (Data.ProtoLens.Field "function" msg')
    +function
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "function")
    +
    +functionName ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "functionName" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "functionName" msg)
    +                 (Data.ProtoLens.Field "functionName" msg')
    +functionName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "functionName")
    +
    +gradient ::
    +         forall msg msg' . Data.ProtoLens.HasField "gradient" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "gradient" msg)
    +             (Data.ProtoLens.Field "gradient" msg')
    +gradient
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gradient")
    +
    +gradientFunc ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "gradientFunc" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "gradientFunc" msg)
    +                 (Data.ProtoLens.Field "gradientFunc" msg')
    +gradientFunc
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gradientFunc")
    +
    +key ::
    +    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    +        (Data.ProtoLens.Field "key" msg')
    +key
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    +
    +maybe'signature ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "maybe'signature" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "maybe'signature" msg)
    +                    (Data.ProtoLens.Field "maybe'signature" msg')
    +maybe'signature
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'signature")
    +
    +maybe'value ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    +                (Data.ProtoLens.Field "maybe'value" msg')
    +maybe'value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    +
    +node ::
    +     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    +         (Data.ProtoLens.Field "node" msg')
    +node
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    +
    +nodeDef ::
    +        forall msg msg' . Data.ProtoLens.HasField "nodeDef" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeDef" msg)
    +            (Data.ProtoLens.Field "nodeDef" msg')
    +nodeDef
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeDef")
    +
    +op ::
    +   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    +     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    +       (Data.ProtoLens.Field "op" msg')
    +op
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    +
    +ret ::
    +    forall msg msg' . Data.ProtoLens.HasField "ret" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "ret" msg)
    +        (Data.ProtoLens.Field "ret" msg')
    +ret
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "ret")
    +
    +signature ::
    +          forall msg msg' . Data.ProtoLens.HasField "signature" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "signature" msg)
    +              (Data.ProtoLens.Field "signature" msg')
    +signature
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "signature")
    +
    +value ::
    +      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    +          (Data.ProtoLens.Field "value" msg')
    +value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html new file mode 100644 index 0000000..b6ab688 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html @@ -0,0 +1,198 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Graph.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/graph.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.Graph where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.Function
    +import qualified Proto.Tensorflow.Core.Framework.NodeDef
    +import qualified Proto.Tensorflow.Core.Framework.Versions
    +
    +data GraphDef = GraphDef{_GraphDef'node ::
    +                         [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
    +                         _GraphDef'versions ::
    +                         Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
    +                         _GraphDef'version :: Data.Int.Int32,
    +                         _GraphDef'library ::
    +                         Prelude.Maybe
    +                           Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary}
    +              deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "node" GraphDef =
    +     [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef]
    +
    +instance Data.ProtoLens.HasField "node" GraphDef GraphDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphDef'node
    +              (\ x__ y__ -> x__{_GraphDef'node = y__})
    +
    +type instance Data.ProtoLens.Field "versions" GraphDef =
    +     Proto.Tensorflow.Core.Framework.Versions.VersionDef
    +
    +instance Data.ProtoLens.HasField "versions" GraphDef GraphDef where
    +        field _
    +          = (Prelude..) maybe'versions
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'versions" GraphDef =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef
    +
    +instance Data.ProtoLens.HasField "maybe'versions" GraphDef GraphDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphDef'versions
    +              (\ x__ y__ -> x__{_GraphDef'versions = y__})
    +
    +type instance Data.ProtoLens.Field "version" GraphDef =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "version" GraphDef GraphDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphDef'version
    +              (\ x__ y__ -> x__{_GraphDef'version = y__})
    +
    +type instance Data.ProtoLens.Field "library" GraphDef =
    +     Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary
    +
    +instance Data.ProtoLens.HasField "library" GraphDef GraphDef where
    +        field _
    +          = (Prelude..) maybe'library
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'library" GraphDef =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary
    +
    +instance Data.ProtoLens.HasField "maybe'library" GraphDef GraphDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphDef'library
    +              (\ x__ y__ -> x__{_GraphDef'library = y__})
    +
    +instance Data.Default.Class.Default GraphDef where
    +        def
    +          = GraphDef{_GraphDef'node = [],
    +                     _GraphDef'versions = Prelude.Nothing,
    +                     _GraphDef'version = Data.ProtoLens.fieldDefault,
    +                     _GraphDef'library = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message GraphDef where
    +        descriptor
    +          = let node__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    +                versions__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "versions"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Versions.VersionDef)
    +                      (Data.ProtoLens.OptionalField maybe'versions)
    +                version__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "version"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
    +                library__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "library"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary)
    +                      (Data.ProtoLens.OptionalField maybe'library)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, node__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, versions__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, version__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, library__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("node", node__field_descriptor),
    +                    ("versions", versions__field_descriptor),
    +                    ("version", version__field_descriptor),
    +                    ("library", library__field_descriptor)])
    +
    +library ::
    +        forall msg msg' . Data.ProtoLens.HasField "library" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "library" msg)
    +            (Data.ProtoLens.Field "library" msg')
    +library
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "library")
    +
    +maybe'library ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "maybe'library" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "maybe'library" msg)
    +                  (Data.ProtoLens.Field "maybe'library" msg')
    +maybe'library
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'library")
    +
    +maybe'versions ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "maybe'versions" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "maybe'versions" msg)
    +                   (Data.ProtoLens.Field "maybe'versions" msg')
    +maybe'versions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'versions")
    +
    +node ::
    +     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    +         (Data.ProtoLens.Field "node" msg')
    +node
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    +
    +version ::
    +        forall msg msg' . Data.ProtoLens.HasField "version" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "version" msg)
    +            (Data.ProtoLens.Field "version" msg')
    +version
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "version")
    +
    +versions ::
    +         forall msg msg' . Data.ProtoLens.HasField "versions" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "versions" msg)
    +             (Data.ProtoLens.Field "versions" msg')
    +versions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "versions")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html new file mode 100644 index 0000000..a91bb4a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html @@ -0,0 +1,257 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/NodeDef.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/node_def.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.NodeDef where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.AttrValue
    +
    +data NodeDef = NodeDef{_NodeDef'name :: Data.Text.Text,
    +                       _NodeDef'op :: Data.Text.Text, _NodeDef'input :: [Data.Text.Text],
    +                       _NodeDef'device :: Data.Text.Text,
    +                       _NodeDef'attr ::
    +                       Data.Map.Map Data.Text.Text
    +                         Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    +             deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" NodeDef = Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" NodeDef NodeDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'name
    +              (\ x__ y__ -> x__{_NodeDef'name = y__})
    +
    +type instance Data.ProtoLens.Field "op" NodeDef = Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "op" NodeDef NodeDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'op
    +              (\ x__ y__ -> x__{_NodeDef'op = y__})
    +
    +type instance Data.ProtoLens.Field "input" NodeDef =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "input" NodeDef NodeDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'input
    +              (\ x__ y__ -> x__{_NodeDef'input = y__})
    +
    +type instance Data.ProtoLens.Field "device" NodeDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "device" NodeDef NodeDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'device
    +              (\ x__ y__ -> x__{_NodeDef'device = y__})
    +
    +type instance Data.ProtoLens.Field "attr" NodeDef =
    +     Data.Map.Map Data.Text.Text
    +       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "attr" NodeDef NodeDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'attr
    +              (\ x__ y__ -> x__{_NodeDef'attr = y__})
    +
    +instance Data.Default.Class.Default NodeDef where
    +        def
    +          = NodeDef{_NodeDef'name = Data.ProtoLens.fieldDefault,
    +                    _NodeDef'op = Data.ProtoLens.fieldDefault, _NodeDef'input = [],
    +                    _NodeDef'device = Data.ProtoLens.fieldDefault,
    +                    _NodeDef'attr = Data.Map.empty}
    +
    +instance Data.ProtoLens.Message NodeDef where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                op__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "op"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
    +                input__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "input"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked input)
    +                device__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    +                attr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "attr"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor NodeDef'AttrEntry)
    +                      (Data.ProtoLens.MapField key value attr)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, op__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, input__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, device__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, attr__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor), ("op", op__field_descriptor),
    +                    ("input", input__field_descriptor),
    +                    ("device", device__field_descriptor),
    +                    ("attr", attr__field_descriptor)])
    +
    +data NodeDef'AttrEntry = NodeDef'AttrEntry{_NodeDef'AttrEntry'key
    +                                           :: Data.Text.Text,
    +                                           _NodeDef'AttrEntry'value ::
    +                                           Prelude.Maybe
    +                                             Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    +                       deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "key" NodeDef'AttrEntry =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "key" NodeDef'AttrEntry
    +         NodeDef'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'key
    +              (\ x__ y__ -> x__{_NodeDef'AttrEntry'key = y__})
    +
    +type instance Data.ProtoLens.Field "value" NodeDef'AttrEntry =
    +     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "value" NodeDef'AttrEntry
    +         NodeDef'AttrEntry where
    +        field _
    +          = (Prelude..) maybe'value
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'value" NodeDef'AttrEntry
    +     = Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "maybe'value" NodeDef'AttrEntry
    +         NodeDef'AttrEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'value
    +              (\ x__ y__ -> x__{_NodeDef'AttrEntry'value = y__})
    +
    +instance Data.Default.Class.Default NodeDef'AttrEntry where
    +        def
    +          = NodeDef'AttrEntry{_NodeDef'AttrEntry'key =
    +                                Data.ProtoLens.fieldDefault,
    +                              _NodeDef'AttrEntry'value = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message NodeDef'AttrEntry where
    +        descriptor
    +          = let key__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "key"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    +                value__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "value"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    +                      (Data.ProtoLens.OptionalField maybe'value)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("key", key__field_descriptor),
    +                    ("value", value__field_descriptor)])
    +
    +attr ::
    +     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    +         (Data.ProtoLens.Field "attr" msg')
    +attr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    +
    +device ::
    +       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    +           (Data.ProtoLens.Field "device" msg')
    +device
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    +
    +input ::
    +      forall msg msg' . Data.ProtoLens.HasField "input" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "input" msg)
    +          (Data.ProtoLens.Field "input" msg')
    +input
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "input")
    +
    +key ::
    +    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    +        (Data.ProtoLens.Field "key" msg')
    +key
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    +
    +maybe'value ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    +                (Data.ProtoLens.Field "maybe'value" msg')
    +maybe'value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    +op ::
    +   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    +     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    +       (Data.ProtoLens.Field "op" msg')
    +op
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    +
    +value ::
    +      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    +          (Data.ProtoLens.Field "value" msg')
    +value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html new file mode 100644 index 0000000..fe7a2c4 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html @@ -0,0 +1,854 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/OpDef.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/op_def.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.OpDef where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.AttrValue
    +import qualified Proto.Tensorflow.Core.Framework.Types
    +
    +data OpDef = OpDef{_OpDef'name :: Data.Text.Text,
    +                   _OpDef'inputArg :: [OpDef'ArgDef],
    +                   _OpDef'outputArg :: [OpDef'ArgDef], _OpDef'attr :: [OpDef'AttrDef],
    +                   _OpDef'deprecation :: Prelude.Maybe OpDeprecation,
    +                   _OpDef'summary :: Data.Text.Text,
    +                   _OpDef'description :: Data.Text.Text,
    +                   _OpDef'isCommutative :: Prelude.Bool,
    +                   _OpDef'isAggregate :: Prelude.Bool,
    +                   _OpDef'isStateful :: Prelude.Bool,
    +                   _OpDef'allowsUninitializedInput :: Prelude.Bool}
    +           deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" OpDef = Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'name
    +              (\ x__ y__ -> x__{_OpDef'name = y__})
    +
    +type instance Data.ProtoLens.Field "inputArg" OpDef =
    +     [OpDef'ArgDef]
    +
    +instance Data.ProtoLens.HasField "inputArg" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'inputArg
    +              (\ x__ y__ -> x__{_OpDef'inputArg = y__})
    +
    +type instance Data.ProtoLens.Field "outputArg" OpDef =
    +     [OpDef'ArgDef]
    +
    +instance Data.ProtoLens.HasField "outputArg" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'outputArg
    +              (\ x__ y__ -> x__{_OpDef'outputArg = y__})
    +
    +type instance Data.ProtoLens.Field "attr" OpDef = [OpDef'AttrDef]
    +
    +instance Data.ProtoLens.HasField "attr" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'attr
    +              (\ x__ y__ -> x__{_OpDef'attr = y__})
    +
    +type instance Data.ProtoLens.Field "deprecation" OpDef =
    +     OpDeprecation
    +
    +instance Data.ProtoLens.HasField "deprecation" OpDef OpDef where
    +        field _
    +          = (Prelude..) maybe'deprecation
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'deprecation" OpDef =
    +     Prelude.Maybe OpDeprecation
    +
    +instance Data.ProtoLens.HasField "maybe'deprecation" OpDef OpDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'deprecation
    +              (\ x__ y__ -> x__{_OpDef'deprecation = y__})
    +
    +type instance Data.ProtoLens.Field "summary" OpDef = Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "summary" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'summary
    +              (\ x__ y__ -> x__{_OpDef'summary = y__})
    +
    +type instance Data.ProtoLens.Field "description" OpDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "description" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'description
    +              (\ x__ y__ -> x__{_OpDef'description = y__})
    +
    +type instance Data.ProtoLens.Field "isCommutative" OpDef =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "isCommutative" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'isCommutative
    +              (\ x__ y__ -> x__{_OpDef'isCommutative = y__})
    +
    +type instance Data.ProtoLens.Field "isAggregate" OpDef =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "isAggregate" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'isAggregate
    +              (\ x__ y__ -> x__{_OpDef'isAggregate = y__})
    +
    +type instance Data.ProtoLens.Field "isStateful" OpDef =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "isStateful" OpDef OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'isStateful
    +              (\ x__ y__ -> x__{_OpDef'isStateful = y__})
    +
    +type instance Data.ProtoLens.Field "allowsUninitializedInput" OpDef
    +     = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "allowsUninitializedInput" OpDef
    +         OpDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'allowsUninitializedInput
    +              (\ x__ y__ -> x__{_OpDef'allowsUninitializedInput = y__})
    +
    +instance Data.Default.Class.Default OpDef where
    +        def
    +          = OpDef{_OpDef'name = Data.ProtoLens.fieldDefault,
    +                  _OpDef'inputArg = [], _OpDef'outputArg = [], _OpDef'attr = [],
    +                  _OpDef'deprecation = Prelude.Nothing,
    +                  _OpDef'summary = Data.ProtoLens.fieldDefault,
    +                  _OpDef'description = Data.ProtoLens.fieldDefault,
    +                  _OpDef'isCommutative = Data.ProtoLens.fieldDefault,
    +                  _OpDef'isAggregate = Data.ProtoLens.fieldDefault,
    +                  _OpDef'isStateful = Data.ProtoLens.fieldDefault,
    +                  _OpDef'allowsUninitializedInput = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message OpDef where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                inputArg__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "input_arg"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputArg)
    +                outputArg__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "output_arg"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputArg)
    +                attr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "attr"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OpDef'AttrDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked attr)
    +                deprecation__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "deprecation"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OpDeprecation)
    +                      (Data.ProtoLens.OptionalField maybe'deprecation)
    +                summary__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "summary"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional summary)
    +                description__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "description"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    +                isCommutative__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "is_commutative"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isCommutative)
    +                isAggregate__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "is_aggregate"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isAggregate)
    +                isStateful__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "is_stateful"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isStateful)
    +                allowsUninitializedInput__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allows_uninitialized_input"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         allowsUninitializedInput)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, inputArg__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, outputArg__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, attr__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, deprecation__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, summary__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, description__field_descriptor),
    +                    (Data.ProtoLens.Tag 18, isCommutative__field_descriptor),
    +                    (Data.ProtoLens.Tag 16, isAggregate__field_descriptor),
    +                    (Data.ProtoLens.Tag 17, isStateful__field_descriptor),
    +                    (Data.ProtoLens.Tag 19,
    +                     allowsUninitializedInput__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor),
    +                    ("input_arg", inputArg__field_descriptor),
    +                    ("output_arg", outputArg__field_descriptor),
    +                    ("attr", attr__field_descriptor),
    +                    ("deprecation", deprecation__field_descriptor),
    +                    ("summary", summary__field_descriptor),
    +                    ("description", description__field_descriptor),
    +                    ("is_commutative", isCommutative__field_descriptor),
    +                    ("is_aggregate", isAggregate__field_descriptor),
    +                    ("is_stateful", isStateful__field_descriptor),
    +                    ("allows_uninitialized_input",
    +                     allowsUninitializedInput__field_descriptor)])
    +
    +data OpDef'ArgDef = OpDef'ArgDef{_OpDef'ArgDef'name ::
    +                                 Data.Text.Text,
    +                                 _OpDef'ArgDef'description :: Data.Text.Text,
    +                                 _OpDef'ArgDef'type' ::
    +                                 Proto.Tensorflow.Core.Framework.Types.DataType,
    +                                 _OpDef'ArgDef'typeAttr :: Data.Text.Text,
    +                                 _OpDef'ArgDef'numberAttr :: Data.Text.Text,
    +                                 _OpDef'ArgDef'typeListAttr :: Data.Text.Text,
    +                                 _OpDef'ArgDef'isRef :: Prelude.Bool}
    +                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" OpDef'ArgDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" OpDef'ArgDef OpDef'ArgDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'name
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'name = y__})
    +
    +type instance Data.ProtoLens.Field "description" OpDef'ArgDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "description" OpDef'ArgDef
    +         OpDef'ArgDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'description
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'description = y__})
    +
    +type instance Data.ProtoLens.Field "type'" OpDef'ArgDef =
    +     Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "type'" OpDef'ArgDef OpDef'ArgDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'type'
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'type' = y__})
    +
    +type instance Data.ProtoLens.Field "typeAttr" OpDef'ArgDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "typeAttr" OpDef'ArgDef
    +         OpDef'ArgDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeAttr
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'typeAttr = y__})
    +
    +type instance Data.ProtoLens.Field "numberAttr" OpDef'ArgDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "numberAttr" OpDef'ArgDef
    +         OpDef'ArgDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'numberAttr
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'numberAttr = y__})
    +
    +type instance Data.ProtoLens.Field "typeListAttr" OpDef'ArgDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "typeListAttr" OpDef'ArgDef
    +         OpDef'ArgDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeListAttr
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'typeListAttr = y__})
    +
    +type instance Data.ProtoLens.Field "isRef" OpDef'ArgDef =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "isRef" OpDef'ArgDef OpDef'ArgDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'isRef
    +              (\ x__ y__ -> x__{_OpDef'ArgDef'isRef = y__})
    +
    +instance Data.Default.Class.Default OpDef'ArgDef where
    +        def
    +          = OpDef'ArgDef{_OpDef'ArgDef'name = Data.ProtoLens.fieldDefault,
    +                         _OpDef'ArgDef'description = Data.ProtoLens.fieldDefault,
    +                         _OpDef'ArgDef'type' = Data.Default.Class.def,
    +                         _OpDef'ArgDef'typeAttr = Data.ProtoLens.fieldDefault,
    +                         _OpDef'ArgDef'numberAttr = Data.ProtoLens.fieldDefault,
    +                         _OpDef'ArgDef'typeListAttr = Data.ProtoLens.fieldDefault,
    +                         _OpDef'ArgDef'isRef = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message OpDef'ArgDef where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                description__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "description"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    +                type'__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
    +                typeAttr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type_attr"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeAttr)
    +                numberAttr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "number_attr"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numberAttr)
    +                typeListAttr__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type_list_attr"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeListAttr)
    +                isRef__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "is_ref"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isRef)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, description__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, type'__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, typeAttr__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, numberAttr__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, typeListAttr__field_descriptor),
    +                    (Data.ProtoLens.Tag 16, isRef__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor),
    +                    ("description", description__field_descriptor),
    +                    ("type", type'__field_descriptor),
    +                    ("type_attr", typeAttr__field_descriptor),
    +                    ("number_attr", numberAttr__field_descriptor),
    +                    ("type_list_attr", typeListAttr__field_descriptor),
    +                    ("is_ref", isRef__field_descriptor)])
    +
    +data OpDef'AttrDef = OpDef'AttrDef{_OpDef'AttrDef'name ::
    +                                   Data.Text.Text,
    +                                   _OpDef'AttrDef'type' :: Data.Text.Text,
    +                                   _OpDef'AttrDef'defaultValue ::
    +                                   Prelude.Maybe
    +                                     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
    +                                   _OpDef'AttrDef'description :: Data.Text.Text,
    +                                   _OpDef'AttrDef'hasMinimum :: Prelude.Bool,
    +                                   _OpDef'AttrDef'minimum :: Data.Int.Int64,
    +                                   _OpDef'AttrDef'allowedValues ::
    +                                   Prelude.Maybe
    +                                     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    +                   deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "name" OpDef'AttrDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" OpDef'AttrDef OpDef'AttrDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'name
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'name = y__})
    +
    +type instance Data.ProtoLens.Field "type'" OpDef'AttrDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "type'" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'type'
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'type' = y__})
    +
    +type instance Data.ProtoLens.Field "defaultValue" OpDef'AttrDef =
    +     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "defaultValue" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = (Prelude..) maybe'defaultValue
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'defaultValue" OpDef'AttrDef =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "maybe'defaultValue" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'defaultValue
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'defaultValue = y__})
    +
    +type instance Data.ProtoLens.Field "description" OpDef'AttrDef =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "description" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'description
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'description = y__})
    +
    +type instance Data.ProtoLens.Field "hasMinimum" OpDef'AttrDef =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "hasMinimum" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'hasMinimum
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'hasMinimum = y__})
    +
    +type instance Data.ProtoLens.Field "minimum" OpDef'AttrDef =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "minimum" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'minimum
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'minimum = y__})
    +
    +type instance Data.ProtoLens.Field "allowedValues" OpDef'AttrDef =
    +     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "allowedValues" OpDef'AttrDef
    +         OpDef'AttrDef where
    +        field _
    +          = (Prelude..) maybe'allowedValues
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'allowedValues" OpDef'AttrDef =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    +
    +instance Data.ProtoLens.HasField "maybe'allowedValues"
    +         OpDef'AttrDef OpDef'AttrDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'allowedValues
    +              (\ x__ y__ -> x__{_OpDef'AttrDef'allowedValues = y__})
    +
    +instance Data.Default.Class.Default OpDef'AttrDef where
    +        def
    +          = OpDef'AttrDef{_OpDef'AttrDef'name = Data.ProtoLens.fieldDefault,
    +                          _OpDef'AttrDef'type' = Data.ProtoLens.fieldDefault,
    +                          _OpDef'AttrDef'defaultValue = Prelude.Nothing,
    +                          _OpDef'AttrDef'description = Data.ProtoLens.fieldDefault,
    +                          _OpDef'AttrDef'hasMinimum = Data.ProtoLens.fieldDefault,
    +                          _OpDef'AttrDef'minimum = Data.ProtoLens.fieldDefault,
    +                          _OpDef'AttrDef'allowedValues = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message OpDef'AttrDef where
    +        descriptor
    +          = let name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                type'__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "type"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
    +                defaultValue__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "default_value"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    +                      (Data.ProtoLens.OptionalField maybe'defaultValue)
    +                description__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "description"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    +                hasMinimum__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "has_minimum"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hasMinimum)
    +                minimum__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "minimum"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minimum)
    +                allowedValues__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allowed_values"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    +                      (Data.ProtoLens.OptionalField maybe'allowedValues)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, type'__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, defaultValue__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, description__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, hasMinimum__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, minimum__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, allowedValues__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("name", name__field_descriptor),
    +                    ("type", type'__field_descriptor),
    +                    ("default_value", defaultValue__field_descriptor),
    +                    ("description", description__field_descriptor),
    +                    ("has_minimum", hasMinimum__field_descriptor),
    +                    ("minimum", minimum__field_descriptor),
    +                    ("allowed_values", allowedValues__field_descriptor)])
    +
    +data OpDeprecation = OpDeprecation{_OpDeprecation'version ::
    +                                   Data.Int.Int32,
    +                                   _OpDeprecation'explanation :: Data.Text.Text}
    +                   deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "version" OpDeprecation =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "version" OpDeprecation
    +         OpDeprecation where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDeprecation'version
    +              (\ x__ y__ -> x__{_OpDeprecation'version = y__})
    +
    +type instance Data.ProtoLens.Field "explanation" OpDeprecation =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "explanation" OpDeprecation
    +         OpDeprecation where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpDeprecation'explanation
    +              (\ x__ y__ -> x__{_OpDeprecation'explanation = y__})
    +
    +instance Data.Default.Class.Default OpDeprecation where
    +        def
    +          = OpDeprecation{_OpDeprecation'version =
    +                            Data.ProtoLens.fieldDefault,
    +                          _OpDeprecation'explanation = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message OpDeprecation where
    +        descriptor
    +          = let version__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "version"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
    +                explanation__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "explanation"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional explanation)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, version__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, explanation__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("version", version__field_descriptor),
    +                    ("explanation", explanation__field_descriptor)])
    +
    +data OpList = OpList{_OpList'op :: [OpDef]}
    +            deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "op" OpList = [OpDef]
    +
    +instance Data.ProtoLens.HasField "op" OpList OpList where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OpList'op
    +              (\ x__ y__ -> x__{_OpList'op = y__})
    +
    +instance Data.Default.Class.Default OpList where
    +        def = OpList{_OpList'op = []}
    +
    +instance Data.ProtoLens.Message OpList where
    +        descriptor
    +          = let op__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "op"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OpDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked op)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList [(Data.ProtoLens.Tag 1, op__field_descriptor)])
    +                (Data.Map.fromList [("op", op__field_descriptor)])
    +
    +allowedValues ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "allowedValues" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "allowedValues" msg)
    +                  (Data.ProtoLens.Field "allowedValues" msg')
    +allowedValues
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allowedValues")
    +
    +allowsUninitializedInput ::
    +                         forall msg msg' .
    +                           Data.ProtoLens.HasField "allowsUninitializedInput" msg msg' =>
    +                           Lens.Family2.Lens msg msg'
    +                             (Data.ProtoLens.Field "allowsUninitializedInput" msg)
    +                             (Data.ProtoLens.Field "allowsUninitializedInput" msg')
    +allowsUninitializedInput
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allowsUninitializedInput")
    +
    +attr ::
    +     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    +         (Data.ProtoLens.Field "attr" msg')
    +attr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    +
    +defaultValue ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "defaultValue" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "defaultValue" msg)
    +                 (Data.ProtoLens.Field "defaultValue" msg')
    +defaultValue
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "defaultValue")
    +
    +deprecation ::
    +            forall msg msg' . Data.ProtoLens.HasField "deprecation" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "deprecation" msg)
    +                (Data.ProtoLens.Field "deprecation" msg')
    +deprecation
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "deprecation")
    +
    +description ::
    +            forall msg msg' . Data.ProtoLens.HasField "description" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "description" msg)
    +                (Data.ProtoLens.Field "description" msg')
    +description
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "description")
    +
    +explanation ::
    +            forall msg msg' . Data.ProtoLens.HasField "explanation" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "explanation" msg)
    +                (Data.ProtoLens.Field "explanation" msg')
    +explanation
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "explanation")
    +
    +hasMinimum ::
    +           forall msg msg' . Data.ProtoLens.HasField "hasMinimum" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "hasMinimum" msg)
    +               (Data.ProtoLens.Field "hasMinimum" msg')
    +hasMinimum
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "hasMinimum")
    +
    +inputArg ::
    +         forall msg msg' . Data.ProtoLens.HasField "inputArg" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inputArg" msg)
    +             (Data.ProtoLens.Field "inputArg" msg')
    +inputArg
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inputArg")
    +
    +isAggregate ::
    +            forall msg msg' . Data.ProtoLens.HasField "isAggregate" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isAggregate" msg)
    +                (Data.ProtoLens.Field "isAggregate" msg')
    +isAggregate
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isAggregate")
    +
    +isCommutative ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "isCommutative" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "isCommutative" msg)
    +                  (Data.ProtoLens.Field "isCommutative" msg')
    +isCommutative
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "isCommutative")
    +
    +isRef ::
    +      forall msg msg' . Data.ProtoLens.HasField "isRef" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isRef" msg)
    +          (Data.ProtoLens.Field "isRef" msg')
    +isRef
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isRef")
    +
    +isStateful ::
    +           forall msg msg' . Data.ProtoLens.HasField "isStateful" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isStateful" msg)
    +               (Data.ProtoLens.Field "isStateful" msg')
    +isStateful
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isStateful")
    +
    +maybe'allowedValues ::
    +                    forall msg msg' .
    +                      Data.ProtoLens.HasField "maybe'allowedValues" msg msg' =>
    +                      Lens.Family2.Lens msg msg'
    +                        (Data.ProtoLens.Field "maybe'allowedValues" msg)
    +                        (Data.ProtoLens.Field "maybe'allowedValues" msg')
    +maybe'allowedValues
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'allowedValues")
    +
    +maybe'defaultValue ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "maybe'defaultValue" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "maybe'defaultValue" msg)
    +                       (Data.ProtoLens.Field "maybe'defaultValue" msg')
    +maybe'defaultValue
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'defaultValue")
    +
    +maybe'deprecation ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "maybe'deprecation" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "maybe'deprecation" msg)
    +                      (Data.ProtoLens.Field "maybe'deprecation" msg')
    +maybe'deprecation
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'deprecation")
    +
    +minimum ::
    +        forall msg msg' . Data.ProtoLens.HasField "minimum" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "minimum" msg)
    +            (Data.ProtoLens.Field "minimum" msg')
    +minimum
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "minimum")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    +numberAttr ::
    +           forall msg msg' . Data.ProtoLens.HasField "numberAttr" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "numberAttr" msg)
    +               (Data.ProtoLens.Field "numberAttr" msg')
    +numberAttr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "numberAttr")
    +
    +op ::
    +   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    +     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    +       (Data.ProtoLens.Field "op" msg')
    +op
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    +
    +outputArg ::
    +          forall msg msg' . Data.ProtoLens.HasField "outputArg" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputArg" msg)
    +              (Data.ProtoLens.Field "outputArg" msg')
    +outputArg
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputArg")
    +
    +summary ::
    +        forall msg msg' . Data.ProtoLens.HasField "summary" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "summary" msg)
    +            (Data.ProtoLens.Field "summary" msg')
    +summary
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "summary")
    +
    +type' ::
    +      forall msg msg' . Data.ProtoLens.HasField "type'" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "type'" msg)
    +          (Data.ProtoLens.Field "type'" msg')
    +type'
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "type'")
    +
    +typeAttr ::
    +         forall msg msg' . Data.ProtoLens.HasField "typeAttr" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "typeAttr" msg)
    +             (Data.ProtoLens.Field "typeAttr" msg')
    +typeAttr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "typeAttr")
    +
    +typeListAttr ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "typeListAttr" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "typeListAttr" msg)
    +                 (Data.ProtoLens.Field "typeListAttr" msg')
    +typeListAttr
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "typeListAttr")
    +
    +version ::
    +        forall msg msg' . Data.ProtoLens.HasField "version" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "version" msg)
    +            (Data.ProtoLens.Field "version" msg')
    +version
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "version")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html new file mode 100644 index 0000000..8d2cde7 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -0,0 +1,182 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/ResourceHandle.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/resource_handle.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.ResourceHandle where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +
    +data ResourceHandle = ResourceHandle{_ResourceHandle'device ::
    +                                     Data.Text.Text,
    +                                     _ResourceHandle'container :: Data.Text.Text,
    +                                     _ResourceHandle'name :: Data.Text.Text,
    +                                     _ResourceHandle'hashCode :: Data.Word.Word64,
    +                                     _ResourceHandle'maybeTypeName :: Data.Text.Text}
    +                    deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "device" ResourceHandle =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "device" ResourceHandle
    +         ResourceHandle where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ResourceHandle'device
    +              (\ x__ y__ -> x__{_ResourceHandle'device = y__})
    +
    +type instance Data.ProtoLens.Field "container" ResourceHandle =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "container" ResourceHandle
    +         ResourceHandle where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ResourceHandle'container
    +              (\ x__ y__ -> x__{_ResourceHandle'container = y__})
    +
    +type instance Data.ProtoLens.Field "name" ResourceHandle =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" ResourceHandle
    +         ResourceHandle where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ResourceHandle'name
    +              (\ x__ y__ -> x__{_ResourceHandle'name = y__})
    +
    +type instance Data.ProtoLens.Field "hashCode" ResourceHandle =
    +     Data.Word.Word64
    +
    +instance Data.ProtoLens.HasField "hashCode" ResourceHandle
    +         ResourceHandle where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ResourceHandle'hashCode
    +              (\ x__ y__ -> x__{_ResourceHandle'hashCode = y__})
    +
    +type instance Data.ProtoLens.Field "maybeTypeName" ResourceHandle =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "maybeTypeName" ResourceHandle
    +         ResourceHandle where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ResourceHandle'maybeTypeName
    +              (\ x__ y__ -> x__{_ResourceHandle'maybeTypeName = y__})
    +
    +instance Data.Default.Class.Default ResourceHandle where
    +        def
    +          = ResourceHandle{_ResourceHandle'device =
    +                             Data.ProtoLens.fieldDefault,
    +                           _ResourceHandle'container = Data.ProtoLens.fieldDefault,
    +                           _ResourceHandle'name = Data.ProtoLens.fieldDefault,
    +                           _ResourceHandle'hashCode = Data.ProtoLens.fieldDefault,
    +                           _ResourceHandle'maybeTypeName = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message ResourceHandle where
    +        descriptor
    +          = let device__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    +                container__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "container"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional container)
    +                name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +                hashCode__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "hash_code"
    +                      (Data.ProtoLens.UInt64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hashCode)
    +                maybeTypeName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "maybe_type_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional maybeTypeName)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, container__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, name__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, hashCode__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, maybeTypeName__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("device", device__field_descriptor),
    +                    ("container", container__field_descriptor),
    +                    ("name", name__field_descriptor),
    +                    ("hash_code", hashCode__field_descriptor),
    +                    ("maybe_type_name", maybeTypeName__field_descriptor)])
    +
    +container ::
    +          forall msg msg' . Data.ProtoLens.HasField "container" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "container" msg)
    +              (Data.ProtoLens.Field "container" msg')
    +container
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "container")
    +
    +device ::
    +       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    +           (Data.ProtoLens.Field "device" msg')
    +device
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    +
    +hashCode ::
    +         forall msg msg' . Data.ProtoLens.HasField "hashCode" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "hashCode" msg)
    +             (Data.ProtoLens.Field "hashCode" msg')
    +hashCode
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "hashCode")
    +
    +maybeTypeName ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "maybeTypeName" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "maybeTypeName" msg)
    +                  (Data.ProtoLens.Field "maybeTypeName" msg')
    +maybeTypeName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybeTypeName")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html new file mode 100644 index 0000000..5fc4c9a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html @@ -0,0 +1,653 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/StepStats.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/step_stats.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.StepStats where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified
    +       Proto.Tensorflow.Core.Framework.AllocationDescription
    +import qualified Proto.Tensorflow.Core.Framework.TensorDescription
    +
    +data AllocatorMemoryUsed = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName
    +                                               :: Data.Text.Text,
    +                                               _AllocatorMemoryUsed'totalBytes :: Data.Int.Int64,
    +                                               _AllocatorMemoryUsed'peakBytes :: Data.Int.Int64}
    +                         deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "allocatorName" AllocatorMemoryUsed =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "allocatorName"
    +         AllocatorMemoryUsed AllocatorMemoryUsed where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'allocatorName
    +              (\ x__ y__ -> x__{_AllocatorMemoryUsed'allocatorName = y__})
    +
    +type instance Data.ProtoLens.Field "totalBytes" AllocatorMemoryUsed
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "totalBytes" AllocatorMemoryUsed
    +         AllocatorMemoryUsed where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'totalBytes
    +              (\ x__ y__ -> x__{_AllocatorMemoryUsed'totalBytes = y__})
    +
    +type instance Data.ProtoLens.Field "peakBytes" AllocatorMemoryUsed
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "peakBytes" AllocatorMemoryUsed
    +         AllocatorMemoryUsed where
    +        field _
    +          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'peakBytes
    +              (\ x__ y__ -> x__{_AllocatorMemoryUsed'peakBytes = y__})
    +
    +instance Data.Default.Class.Default AllocatorMemoryUsed where
    +        def
    +          = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName =
    +                                  Data.ProtoLens.fieldDefault,
    +                                _AllocatorMemoryUsed'totalBytes = Data.ProtoLens.fieldDefault,
    +                                _AllocatorMemoryUsed'peakBytes = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message AllocatorMemoryUsed where
    +        descriptor
    +          = let allocatorName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocator_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
    +                totalBytes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "total_bytes"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional totalBytes)
    +                peakBytes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "peak_bytes"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional peakBytes)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, allocatorName__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, totalBytes__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, peakBytes__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("allocator_name", allocatorName__field_descriptor),
    +                    ("total_bytes", totalBytes__field_descriptor),
    +                    ("peak_bytes", peakBytes__field_descriptor)])
    +
    +data DeviceStepStats = DeviceStepStats{_DeviceStepStats'device ::
    +                                       Data.Text.Text,
    +                                       _DeviceStepStats'nodeStats :: [NodeExecStats]}
    +                     deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "device" DeviceStepStats =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "device" DeviceStepStats
    +         DeviceStepStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DeviceStepStats'device
    +              (\ x__ y__ -> x__{_DeviceStepStats'device = y__})
    +
    +type instance Data.ProtoLens.Field "nodeStats" DeviceStepStats =
    +     [NodeExecStats]
    +
    +instance Data.ProtoLens.HasField "nodeStats" DeviceStepStats
    +         DeviceStepStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DeviceStepStats'nodeStats
    +              (\ x__ y__ -> x__{_DeviceStepStats'nodeStats = y__})
    +
    +instance Data.Default.Class.Default DeviceStepStats where
    +        def
    +          = DeviceStepStats{_DeviceStepStats'device =
    +                              Data.ProtoLens.fieldDefault,
    +                            _DeviceStepStats'nodeStats = []}
    +
    +instance Data.ProtoLens.Message DeviceStepStats where
    +        descriptor
    +          = let device__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    +                nodeStats__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node_stats"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor NodeExecStats)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeStats)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, nodeStats__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("device", device__field_descriptor),
    +                    ("node_stats", nodeStats__field_descriptor)])
    +
    +data NodeExecStats = NodeExecStats{_NodeExecStats'nodeName ::
    +                                   Data.Text.Text,
    +                                   _NodeExecStats'allStartMicros :: Data.Int.Int64,
    +                                   _NodeExecStats'opStartRelMicros :: Data.Int.Int64,
    +                                   _NodeExecStats'opEndRelMicros :: Data.Int.Int64,
    +                                   _NodeExecStats'allEndRelMicros :: Data.Int.Int64,
    +                                   _NodeExecStats'memory :: [AllocatorMemoryUsed],
    +                                   _NodeExecStats'output :: [NodeOutput],
    +                                   _NodeExecStats'timelineLabel :: Data.Text.Text,
    +                                   _NodeExecStats'scheduledMicros :: Data.Int.Int64,
    +                                   _NodeExecStats'threadId :: Data.Word.Word32,
    +                                   _NodeExecStats'referencedTensor ::
    +                                   [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription]}
    +                   deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "nodeName" NodeExecStats =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "nodeName" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'nodeName
    +              (\ x__ y__ -> x__{_NodeExecStats'nodeName = y__})
    +
    +type instance Data.ProtoLens.Field "allStartMicros" NodeExecStats =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "allStartMicros" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'allStartMicros
    +              (\ x__ y__ -> x__{_NodeExecStats'allStartMicros = y__})
    +
    +type instance Data.ProtoLens.Field "opStartRelMicros" NodeExecStats
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "opStartRelMicros" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'opStartRelMicros
    +              (\ x__ y__ -> x__{_NodeExecStats'opStartRelMicros = y__})
    +
    +type instance Data.ProtoLens.Field "opEndRelMicros" NodeExecStats =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "opEndRelMicros" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'opEndRelMicros
    +              (\ x__ y__ -> x__{_NodeExecStats'opEndRelMicros = y__})
    +
    +type instance Data.ProtoLens.Field "allEndRelMicros" NodeExecStats
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "allEndRelMicros" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'allEndRelMicros
    +              (\ x__ y__ -> x__{_NodeExecStats'allEndRelMicros = y__})
    +
    +type instance Data.ProtoLens.Field "memory" NodeExecStats =
    +     [AllocatorMemoryUsed]
    +
    +instance Data.ProtoLens.HasField "memory" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'memory
    +              (\ x__ y__ -> x__{_NodeExecStats'memory = y__})
    +
    +type instance Data.ProtoLens.Field "output" NodeExecStats =
    +     [NodeOutput]
    +
    +instance Data.ProtoLens.HasField "output" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'output
    +              (\ x__ y__ -> x__{_NodeExecStats'output = y__})
    +
    +type instance Data.ProtoLens.Field "timelineLabel" NodeExecStats =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "timelineLabel" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'timelineLabel
    +              (\ x__ y__ -> x__{_NodeExecStats'timelineLabel = y__})
    +
    +type instance Data.ProtoLens.Field "scheduledMicros" NodeExecStats
    +     = Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "scheduledMicros" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'scheduledMicros
    +              (\ x__ y__ -> x__{_NodeExecStats'scheduledMicros = y__})
    +
    +type instance Data.ProtoLens.Field "threadId" NodeExecStats =
    +     Data.Word.Word32
    +
    +instance Data.ProtoLens.HasField "threadId" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'threadId
    +              (\ x__ y__ -> x__{_NodeExecStats'threadId = y__})
    +
    +type instance Data.ProtoLens.Field "referencedTensor" NodeExecStats
    +     =
    +     [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription]
    +
    +instance Data.ProtoLens.HasField "referencedTensor" NodeExecStats
    +         NodeExecStats where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeExecStats'referencedTensor
    +              (\ x__ y__ -> x__{_NodeExecStats'referencedTensor = y__})
    +
    +instance Data.Default.Class.Default NodeExecStats where
    +        def
    +          = NodeExecStats{_NodeExecStats'nodeName =
    +                            Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'allStartMicros = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'opStartRelMicros = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'opEndRelMicros = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'allEndRelMicros = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'memory = [], _NodeExecStats'output = [],
    +                          _NodeExecStats'timelineLabel = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'scheduledMicros = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'threadId = Data.ProtoLens.fieldDefault,
    +                          _NodeExecStats'referencedTensor = []}
    +
    +instance Data.ProtoLens.Message NodeExecStats where
    +        descriptor
    +          = let nodeName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
    +                allStartMicros__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "all_start_micros"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allStartMicros)
    +                opStartRelMicros__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "op_start_rel_micros"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         opStartRelMicros)
    +                opEndRelMicros__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "op_end_rel_micros"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional opEndRelMicros)
    +                allEndRelMicros__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "all_end_rel_micros"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allEndRelMicros)
    +                memory__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "memory"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor AllocatorMemoryUsed)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked memory)
    +                output__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "output"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor NodeOutput)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked output)
    +                timelineLabel__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "timeline_label"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineLabel)
    +                scheduledMicros__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "scheduled_micros"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional scheduledMicros)
    +                threadId__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "thread_id"
    +                      (Data.ProtoLens.UInt32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional threadId)
    +                referencedTensor__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "referenced_tensor"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         referencedTensor)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, allStartMicros__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, opStartRelMicros__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, opEndRelMicros__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, allEndRelMicros__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, memory__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, output__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, timelineLabel__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, scheduledMicros__field_descriptor),
    +                    (Data.ProtoLens.Tag 10, threadId__field_descriptor),
    +                    (Data.ProtoLens.Tag 11, referencedTensor__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("node_name", nodeName__field_descriptor),
    +                    ("all_start_micros", allStartMicros__field_descriptor),
    +                    ("op_start_rel_micros", opStartRelMicros__field_descriptor),
    +                    ("op_end_rel_micros", opEndRelMicros__field_descriptor),
    +                    ("all_end_rel_micros", allEndRelMicros__field_descriptor),
    +                    ("memory", memory__field_descriptor),
    +                    ("output", output__field_descriptor),
    +                    ("timeline_label", timelineLabel__field_descriptor),
    +                    ("scheduled_micros", scheduledMicros__field_descriptor),
    +                    ("thread_id", threadId__field_descriptor),
    +                    ("referenced_tensor", referencedTensor__field_descriptor)])
    +
    +data NodeOutput = NodeOutput{_NodeOutput'slot :: Data.Int.Int32,
    +                             _NodeOutput'tensorDescription ::
    +                             Prelude.Maybe
    +                               Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription}
    +                deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "slot" NodeOutput =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "slot" NodeOutput NodeOutput where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeOutput'slot
    +              (\ x__ y__ -> x__{_NodeOutput'slot = y__})
    +
    +type instance Data.ProtoLens.Field "tensorDescription" NodeOutput =
    +     Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription
    +
    +instance Data.ProtoLens.HasField "tensorDescription" NodeOutput
    +         NodeOutput where
    +        field _
    +          = (Prelude..) maybe'tensorDescription
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'tensorDescription" NodeOutput =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription
    +
    +instance Data.ProtoLens.HasField "maybe'tensorDescription"
    +         NodeOutput NodeOutput where
    +        field _
    +          = Lens.Family2.Unchecked.lens _NodeOutput'tensorDescription
    +              (\ x__ y__ -> x__{_NodeOutput'tensorDescription = y__})
    +
    +instance Data.Default.Class.Default NodeOutput where
    +        def
    +          = NodeOutput{_NodeOutput'slot = Data.ProtoLens.fieldDefault,
    +                       _NodeOutput'tensorDescription = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message NodeOutput where
    +        descriptor
    +          = let slot__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "slot"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional slot)
    +                tensorDescription__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "tensor_description"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)
    +                      (Data.ProtoLens.OptionalField maybe'tensorDescription)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, slot__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, tensorDescription__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("slot", slot__field_descriptor),
    +                    ("tensor_description", tensorDescription__field_descriptor)])
    +
    +data StepStats = StepStats{_StepStats'devStats ::
    +                           [DeviceStepStats]}
    +               deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "devStats" StepStats =
    +     [DeviceStepStats]
    +
    +instance Data.ProtoLens.HasField "devStats" StepStats StepStats
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _StepStats'devStats
    +              (\ x__ y__ -> x__{_StepStats'devStats = y__})
    +
    +instance Data.Default.Class.Default StepStats where
    +        def = StepStats{_StepStats'devStats = []}
    +
    +instance Data.ProtoLens.Message StepStats where
    +        descriptor
    +          = let devStats__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dev_stats"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor DeviceStepStats)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked devStats)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, devStats__field_descriptor)])
    +                (Data.Map.fromList [("dev_stats", devStats__field_descriptor)])
    +
    +allEndRelMicros ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "allEndRelMicros" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "allEndRelMicros" msg)
    +                    (Data.ProtoLens.Field "allEndRelMicros" msg')
    +allEndRelMicros
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allEndRelMicros")
    +
    +allStartMicros ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "allStartMicros" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "allStartMicros" msg)
    +                   (Data.ProtoLens.Field "allStartMicros" msg')
    +allStartMicros
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allStartMicros")
    +
    +allocatorName ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "allocatorName" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "allocatorName" msg)
    +                  (Data.ProtoLens.Field "allocatorName" msg')
    +allocatorName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allocatorName")
    +
    +devStats ::
    +         forall msg msg' . Data.ProtoLens.HasField "devStats" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "devStats" msg)
    +             (Data.ProtoLens.Field "devStats" msg')
    +devStats
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "devStats")
    +
    +device ::
    +       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    +           (Data.ProtoLens.Field "device" msg')
    +device
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    +
    +maybe'tensorDescription ::
    +                        forall msg msg' .
    +                          Data.ProtoLens.HasField "maybe'tensorDescription" msg msg' =>
    +                          Lens.Family2.Lens msg msg'
    +                            (Data.ProtoLens.Field "maybe'tensorDescription" msg)
    +                            (Data.ProtoLens.Field "maybe'tensorDescription" msg')
    +maybe'tensorDescription
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'tensorDescription")
    +
    +memory ::
    +       forall msg msg' . Data.ProtoLens.HasField "memory" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "memory" msg)
    +           (Data.ProtoLens.Field "memory" msg')
    +memory
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "memory")
    +
    +nodeName ::
    +         forall msg msg' . Data.ProtoLens.HasField "nodeName" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeName" msg)
    +             (Data.ProtoLens.Field "nodeName" msg')
    +nodeName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeName")
    +
    +nodeStats ::
    +          forall msg msg' . Data.ProtoLens.HasField "nodeStats" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeStats" msg)
    +              (Data.ProtoLens.Field "nodeStats" msg')
    +nodeStats
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeStats")
    +
    +opEndRelMicros ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "opEndRelMicros" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "opEndRelMicros" msg)
    +                   (Data.ProtoLens.Field "opEndRelMicros" msg')
    +opEndRelMicros
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "opEndRelMicros")
    +
    +opStartRelMicros ::
    +                 forall msg msg' .
    +                   Data.ProtoLens.HasField "opStartRelMicros" msg msg' =>
    +                   Lens.Family2.Lens msg msg'
    +                     (Data.ProtoLens.Field "opStartRelMicros" msg)
    +                     (Data.ProtoLens.Field "opStartRelMicros" msg')
    +opStartRelMicros
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "opStartRelMicros")
    +
    +output ::
    +       forall msg msg' . Data.ProtoLens.HasField "output" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "output" msg)
    +           (Data.ProtoLens.Field "output" msg')
    +output
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "output")
    +
    +peakBytes ::
    +          forall msg msg' . Data.ProtoLens.HasField "peakBytes" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "peakBytes" msg)
    +              (Data.ProtoLens.Field "peakBytes" msg')
    +peakBytes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "peakBytes")
    +
    +referencedTensor ::
    +                 forall msg msg' .
    +                   Data.ProtoLens.HasField "referencedTensor" msg msg' =>
    +                   Lens.Family2.Lens msg msg'
    +                     (Data.ProtoLens.Field "referencedTensor" msg)
    +                     (Data.ProtoLens.Field "referencedTensor" msg')
    +referencedTensor
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "referencedTensor")
    +
    +scheduledMicros ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "scheduledMicros" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "scheduledMicros" msg)
    +                    (Data.ProtoLens.Field "scheduledMicros" msg')
    +scheduledMicros
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "scheduledMicros")
    +
    +slot ::
    +     forall msg msg' . Data.ProtoLens.HasField "slot" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "slot" msg)
    +         (Data.ProtoLens.Field "slot" msg')
    +slot
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "slot")
    +
    +tensorDescription ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "tensorDescription" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "tensorDescription" msg)
    +                      (Data.ProtoLens.Field "tensorDescription" msg')
    +tensorDescription
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "tensorDescription")
    +
    +threadId ::
    +         forall msg msg' . Data.ProtoLens.HasField "threadId" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "threadId" msg)
    +             (Data.ProtoLens.Field "threadId" msg')
    +threadId
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "threadId")
    +
    +timelineLabel ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "timelineLabel" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "timelineLabel" msg)
    +                  (Data.ProtoLens.Field "timelineLabel" msg')
    +timelineLabel
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "timelineLabel")
    +
    +totalBytes ::
    +           forall msg msg' . Data.ProtoLens.HasField "totalBytes" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "totalBytes" msg)
    +               (Data.ProtoLens.Field "totalBytes" msg')
    +totalBytes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "totalBytes")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html new file mode 100644 index 0000000..5d54c36 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html @@ -0,0 +1,448 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Tensor.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/tensor.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.Tensor where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.ResourceHandle
    +import qualified Proto.Tensorflow.Core.Framework.TensorShape
    +import qualified Proto.Tensorflow.Core.Framework.Types
    +
    +data TensorProto = TensorProto{_TensorProto'dtype ::
    +                               Proto.Tensorflow.Core.Framework.Types.DataType,
    +                               _TensorProto'tensorShape ::
    +                               Prelude.Maybe
    +                                 Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    +                               _TensorProto'versionNumber :: Data.Int.Int32,
    +                               _TensorProto'tensorContent :: Data.ByteString.ByteString,
    +                               _TensorProto'halfVal :: [Data.Int.Int32],
    +                               _TensorProto'floatVal :: [Prelude.Float],
    +                               _TensorProto'doubleVal :: [Prelude.Double],
    +                               _TensorProto'intVal :: [Data.Int.Int32],
    +                               _TensorProto'stringVal :: [Data.ByteString.ByteString],
    +                               _TensorProto'scomplexVal :: [Prelude.Float],
    +                               _TensorProto'int64Val :: [Data.Int.Int64],
    +                               _TensorProto'boolVal :: [Prelude.Bool],
    +                               _TensorProto'dcomplexVal :: [Prelude.Double],
    +                               _TensorProto'resourceHandleVal ::
    +                               [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle]}
    +                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "dtype" TensorProto =
    +     Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "dtype" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'dtype
    +              (\ x__ y__ -> x__{_TensorProto'dtype = y__})
    +
    +type instance Data.ProtoLens.Field "tensorShape" TensorProto =
    +     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "tensorShape" TensorProto
    +         TensorProto where
    +        field _
    +          = (Prelude..) maybe'tensorShape
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'tensorShape" TensorProto
    +     =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "maybe'tensorShape" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'tensorShape
    +              (\ x__ y__ -> x__{_TensorProto'tensorShape = y__})
    +
    +type instance Data.ProtoLens.Field "versionNumber" TensorProto =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "versionNumber" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'versionNumber
    +              (\ x__ y__ -> x__{_TensorProto'versionNumber = y__})
    +
    +type instance Data.ProtoLens.Field "tensorContent" TensorProto =
    +     Data.ByteString.ByteString
    +
    +instance Data.ProtoLens.HasField "tensorContent" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'tensorContent
    +              (\ x__ y__ -> x__{_TensorProto'tensorContent = y__})
    +
    +type instance Data.ProtoLens.Field "halfVal" TensorProto =
    +     [Data.Int.Int32]
    +
    +instance Data.ProtoLens.HasField "halfVal" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'halfVal
    +              (\ x__ y__ -> x__{_TensorProto'halfVal = y__})
    +
    +type instance Data.ProtoLens.Field "floatVal" TensorProto =
    +     [Prelude.Float]
    +
    +instance Data.ProtoLens.HasField "floatVal" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'floatVal
    +              (\ x__ y__ -> x__{_TensorProto'floatVal = y__})
    +
    +type instance Data.ProtoLens.Field "doubleVal" TensorProto =
    +     [Prelude.Double]
    +
    +instance Data.ProtoLens.HasField "doubleVal" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'doubleVal
    +              (\ x__ y__ -> x__{_TensorProto'doubleVal = y__})
    +
    +type instance Data.ProtoLens.Field "intVal" TensorProto =
    +     [Data.Int.Int32]
    +
    +instance Data.ProtoLens.HasField "intVal" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'intVal
    +              (\ x__ y__ -> x__{_TensorProto'intVal = y__})
    +
    +type instance Data.ProtoLens.Field "stringVal" TensorProto =
    +     [Data.ByteString.ByteString]
    +
    +instance Data.ProtoLens.HasField "stringVal" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'stringVal
    +              (\ x__ y__ -> x__{_TensorProto'stringVal = y__})
    +
    +type instance Data.ProtoLens.Field "scomplexVal" TensorProto =
    +     [Prelude.Float]
    +
    +instance Data.ProtoLens.HasField "scomplexVal" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'scomplexVal
    +              (\ x__ y__ -> x__{_TensorProto'scomplexVal = y__})
    +
    +type instance Data.ProtoLens.Field "int64Val" TensorProto =
    +     [Data.Int.Int64]
    +
    +instance Data.ProtoLens.HasField "int64Val" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'int64Val
    +              (\ x__ y__ -> x__{_TensorProto'int64Val = y__})
    +
    +type instance Data.ProtoLens.Field "boolVal" TensorProto =
    +     [Prelude.Bool]
    +
    +instance Data.ProtoLens.HasField "boolVal" TensorProto TensorProto
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'boolVal
    +              (\ x__ y__ -> x__{_TensorProto'boolVal = y__})
    +
    +type instance Data.ProtoLens.Field "dcomplexVal" TensorProto =
    +     [Prelude.Double]
    +
    +instance Data.ProtoLens.HasField "dcomplexVal" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'dcomplexVal
    +              (\ x__ y__ -> x__{_TensorProto'dcomplexVal = y__})
    +
    +type instance Data.ProtoLens.Field "resourceHandleVal" TensorProto
    +     = [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle]
    +
    +instance Data.ProtoLens.HasField "resourceHandleVal" TensorProto
    +         TensorProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorProto'resourceHandleVal
    +              (\ x__ y__ -> x__{_TensorProto'resourceHandleVal = y__})
    +
    +instance Data.Default.Class.Default TensorProto where
    +        def
    +          = TensorProto{_TensorProto'dtype = Data.Default.Class.def,
    +                        _TensorProto'tensorShape = Prelude.Nothing,
    +                        _TensorProto'versionNumber = Data.ProtoLens.fieldDefault,
    +                        _TensorProto'tensorContent = Data.ProtoLens.fieldDefault,
    +                        _TensorProto'halfVal = [], _TensorProto'floatVal = [],
    +                        _TensorProto'doubleVal = [], _TensorProto'intVal = [],
    +                        _TensorProto'stringVal = [], _TensorProto'scomplexVal = [],
    +                        _TensorProto'int64Val = [], _TensorProto'boolVal = [],
    +                        _TensorProto'dcomplexVal = [], _TensorProto'resourceHandleVal = []}
    +
    +instance Data.ProtoLens.Message TensorProto where
    +        descriptor
    +          = let dtype__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dtype"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    +                tensorShape__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "tensor_shape"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    +                      (Data.ProtoLens.OptionalField maybe'tensorShape)
    +                versionNumber__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "version_number"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional versionNumber)
    +                tensorContent__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "tensor_content"
    +                      (Data.ProtoLens.BytesField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional tensorContent)
    +                halfVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "half_val"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed halfVal)
    +                floatVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "float_val"
    +                      (Data.ProtoLens.FloatField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed floatVal)
    +                doubleVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "double_val"
    +                      (Data.ProtoLens.DoubleField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed doubleVal)
    +                intVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "int_val"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed intVal)
    +                stringVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "string_val"
    +                      (Data.ProtoLens.BytesField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked stringVal)
    +                scomplexVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "scomplex_val"
    +                      (Data.ProtoLens.FloatField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed scomplexVal)
    +                int64Val__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "int64_val"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed int64Val)
    +                boolVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "bool_val"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed boolVal)
    +                dcomplexVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dcomplex_val"
    +                      (Data.ProtoLens.DoubleField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed dcomplexVal)
    +                resourceHandleVal__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "resource_handle_val"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         resourceHandleVal)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, tensorShape__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, versionNumber__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, tensorContent__field_descriptor),
    +                    (Data.ProtoLens.Tag 13, halfVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, floatVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, doubleVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, intVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, stringVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, scomplexVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 10, int64Val__field_descriptor),
    +                    (Data.ProtoLens.Tag 11, boolVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 12, dcomplexVal__field_descriptor),
    +                    (Data.ProtoLens.Tag 14, resourceHandleVal__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("dtype", dtype__field_descriptor),
    +                    ("tensor_shape", tensorShape__field_descriptor),
    +                    ("version_number", versionNumber__field_descriptor),
    +                    ("tensor_content", tensorContent__field_descriptor),
    +                    ("half_val", halfVal__field_descriptor),
    +                    ("float_val", floatVal__field_descriptor),
    +                    ("double_val", doubleVal__field_descriptor),
    +                    ("int_val", intVal__field_descriptor),
    +                    ("string_val", stringVal__field_descriptor),
    +                    ("scomplex_val", scomplexVal__field_descriptor),
    +                    ("int64_val", int64Val__field_descriptor),
    +                    ("bool_val", boolVal__field_descriptor),
    +                    ("dcomplex_val", dcomplexVal__field_descriptor),
    +                    ("resource_handle_val", resourceHandleVal__field_descriptor)])
    +
    +boolVal ::
    +        forall msg msg' . Data.ProtoLens.HasField "boolVal" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "boolVal" msg)
    +            (Data.ProtoLens.Field "boolVal" msg')
    +boolVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "boolVal")
    +
    +dcomplexVal ::
    +            forall msg msg' . Data.ProtoLens.HasField "dcomplexVal" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dcomplexVal" msg)
    +                (Data.ProtoLens.Field "dcomplexVal" msg')
    +dcomplexVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dcomplexVal")
    +
    +doubleVal ::
    +          forall msg msg' . Data.ProtoLens.HasField "doubleVal" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "doubleVal" msg)
    +              (Data.ProtoLens.Field "doubleVal" msg')
    +doubleVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "doubleVal")
    +
    +dtype ::
    +      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    +          (Data.ProtoLens.Field "dtype" msg')
    +dtype
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    +
    +floatVal ::
    +         forall msg msg' . Data.ProtoLens.HasField "floatVal" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "floatVal" msg)
    +             (Data.ProtoLens.Field "floatVal" msg')
    +floatVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "floatVal")
    +
    +halfVal ::
    +        forall msg msg' . Data.ProtoLens.HasField "halfVal" msg msg' =>
    +          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "halfVal" msg)
    +            (Data.ProtoLens.Field "halfVal" msg')
    +halfVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "halfVal")
    +
    +int64Val ::
    +         forall msg msg' . Data.ProtoLens.HasField "int64Val" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "int64Val" msg)
    +             (Data.ProtoLens.Field "int64Val" msg')
    +int64Val
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "int64Val")
    +
    +intVal ::
    +       forall msg msg' . Data.ProtoLens.HasField "intVal" msg msg' =>
    +         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "intVal" msg)
    +           (Data.ProtoLens.Field "intVal" msg')
    +intVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "intVal")
    +
    +maybe'tensorShape ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "maybe'tensorShape" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "maybe'tensorShape" msg)
    +                      (Data.ProtoLens.Field "maybe'tensorShape" msg')
    +maybe'tensorShape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'tensorShape")
    +
    +resourceHandleVal ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "resourceHandleVal" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "resourceHandleVal" msg)
    +                      (Data.ProtoLens.Field "resourceHandleVal" msg')
    +resourceHandleVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "resourceHandleVal")
    +
    +scomplexVal ::
    +            forall msg msg' . Data.ProtoLens.HasField "scomplexVal" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "scomplexVal" msg)
    +                (Data.ProtoLens.Field "scomplexVal" msg')
    +scomplexVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "scomplexVal")
    +
    +stringVal ::
    +          forall msg msg' . Data.ProtoLens.HasField "stringVal" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "stringVal" msg)
    +              (Data.ProtoLens.Field "stringVal" msg')
    +stringVal
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "stringVal")
    +
    +tensorContent ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "tensorContent" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "tensorContent" msg)
    +                  (Data.ProtoLens.Field "tensorContent" msg')
    +tensorContent
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "tensorContent")
    +
    +tensorShape ::
    +            forall msg msg' . Data.ProtoLens.HasField "tensorShape" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "tensorShape" msg)
    +                (Data.ProtoLens.Field "tensorShape" msg')
    +tensorShape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "tensorShape")
    +
    +versionNumber ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "versionNumber" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "versionNumber" msg)
    +                  (Data.ProtoLens.Field "versionNumber" msg')
    +versionNumber
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "versionNumber")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html new file mode 100644 index 0000000..d9bcc9b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html @@ -0,0 +1,187 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/TensorDescription.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/tensor_description.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.TensorDescription where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified
    +       Proto.Tensorflow.Core.Framework.AllocationDescription
    +import qualified Proto.Tensorflow.Core.Framework.TensorShape
    +import qualified Proto.Tensorflow.Core.Framework.Types
    +
    +data TensorDescription = TensorDescription{_TensorDescription'dtype
    +                                           :: Proto.Tensorflow.Core.Framework.Types.DataType,
    +                                           _TensorDescription'shape ::
    +                                           Prelude.Maybe
    +                                             Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    +                                           _TensorDescription'allocationDescription ::
    +                                           Prelude.Maybe
    +                                             Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription}
    +                       deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "dtype" TensorDescription =
    +     Proto.Tensorflow.Core.Framework.Types.DataType
    +
    +instance Data.ProtoLens.HasField "dtype" TensorDescription
    +         TensorDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorDescription'dtype
    +              (\ x__ y__ -> x__{_TensorDescription'dtype = y__})
    +
    +type instance Data.ProtoLens.Field "shape" TensorDescription =
    +     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "shape" TensorDescription
    +         TensorDescription where
    +        field _
    +          = (Prelude..) maybe'shape
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'shape" TensorDescription
    +     =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    +
    +instance Data.ProtoLens.HasField "maybe'shape" TensorDescription
    +         TensorDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorDescription'shape
    +              (\ x__ y__ -> x__{_TensorDescription'shape = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "allocationDescription" TensorDescription =
    +     Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription
    +
    +instance Data.ProtoLens.HasField "allocationDescription"
    +         TensorDescription TensorDescription where
    +        field _
    +          = (Prelude..) maybe'allocationDescription
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'allocationDescription"
    +       TensorDescription
    +     =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription
    +
    +instance Data.ProtoLens.HasField "maybe'allocationDescription"
    +         TensorDescription TensorDescription where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _TensorDescription'allocationDescription
    +              (\ x__ y__ -> x__{_TensorDescription'allocationDescription = y__})
    +
    +instance Data.Default.Class.Default TensorDescription where
    +        def
    +          = TensorDescription{_TensorDescription'dtype =
    +                                Data.Default.Class.def,
    +                              _TensorDescription'shape = Prelude.Nothing,
    +                              _TensorDescription'allocationDescription = Prelude.Nothing}
    +
    +instance Data.ProtoLens.Message TensorDescription where
    +        descriptor
    +          = let dtype__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dtype"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Types.DataType)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    +                shape__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "shape"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    +                      (Data.ProtoLens.OptionalField maybe'shape)
    +                allocationDescription__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocation_description"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
    +                      (Data.ProtoLens.OptionalField maybe'allocationDescription)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, allocationDescription__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("dtype", dtype__field_descriptor),
    +                    ("shape", shape__field_descriptor),
    +                    ("allocation_description",
    +                     allocationDescription__field_descriptor)])
    +
    +allocationDescription ::
    +                      forall msg msg' .
    +                        Data.ProtoLens.HasField "allocationDescription" msg msg' =>
    +                        Lens.Family2.Lens msg msg'
    +                          (Data.ProtoLens.Field "allocationDescription" msg)
    +                          (Data.ProtoLens.Field "allocationDescription" msg')
    +allocationDescription
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allocationDescription")
    +
    +dtype ::
    +      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    +          (Data.ProtoLens.Field "dtype" msg')
    +dtype
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    +
    +maybe'allocationDescription ::
    +                            forall msg msg' .
    +                              Data.ProtoLens.HasField "maybe'allocationDescription" msg msg' =>
    +                              Lens.Family2.Lens msg msg'
    +                                (Data.ProtoLens.Field "maybe'allocationDescription" msg)
    +                                (Data.ProtoLens.Field "maybe'allocationDescription" msg')
    +maybe'allocationDescription
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'allocationDescription")
    +
    +maybe'shape ::
    +            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    +                (Data.ProtoLens.Field "maybe'shape" msg')
    +maybe'shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    +
    +shape ::
    +      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    +          (Data.ProtoLens.Field "shape" msg')
    +shape
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html new file mode 100644 index 0000000..805a823 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html @@ -0,0 +1,166 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/TensorShape.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/tensor_shape.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.TensorShape where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +
    +data TensorShapeProto = TensorShapeProto{_TensorShapeProto'dim ::
    +                                         [TensorShapeProto'Dim],
    +                                         _TensorShapeProto'unknownRank :: Prelude.Bool}
    +                      deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "dim" TensorShapeProto =
    +     [TensorShapeProto'Dim]
    +
    +instance Data.ProtoLens.HasField "dim" TensorShapeProto
    +         TensorShapeProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorShapeProto'dim
    +              (\ x__ y__ -> x__{_TensorShapeProto'dim = y__})
    +
    +type instance Data.ProtoLens.Field "unknownRank" TensorShapeProto =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "unknownRank" TensorShapeProto
    +         TensorShapeProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorShapeProto'unknownRank
    +              (\ x__ y__ -> x__{_TensorShapeProto'unknownRank = y__})
    +
    +instance Data.Default.Class.Default TensorShapeProto where
    +        def
    +          = TensorShapeProto{_TensorShapeProto'dim = [],
    +                             _TensorShapeProto'unknownRank = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message TensorShapeProto where
    +        descriptor
    +          = let dim__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "dim"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor TensorShapeProto'Dim)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked dim)
    +                unknownRank__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "unknown_rank"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional unknownRank)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 2, dim__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, unknownRank__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("dim", dim__field_descriptor),
    +                    ("unknown_rank", unknownRank__field_descriptor)])
    +
    +data TensorShapeProto'Dim = TensorShapeProto'Dim{_TensorShapeProto'Dim'size
    +                                                 :: Data.Int.Int64,
    +                                                 _TensorShapeProto'Dim'name :: Data.Text.Text}
    +                          deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "size" TensorShapeProto'Dim =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "size" TensorShapeProto'Dim
    +         TensorShapeProto'Dim where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'size
    +              (\ x__ y__ -> x__{_TensorShapeProto'Dim'size = y__})
    +
    +type instance Data.ProtoLens.Field "name" TensorShapeProto'Dim =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "name" TensorShapeProto'Dim
    +         TensorShapeProto'Dim where
    +        field _
    +          = Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'name
    +              (\ x__ y__ -> x__{_TensorShapeProto'Dim'name = y__})
    +
    +instance Data.Default.Class.Default TensorShapeProto'Dim where
    +        def
    +          = TensorShapeProto'Dim{_TensorShapeProto'Dim'size =
    +                                   Data.ProtoLens.fieldDefault,
    +                                 _TensorShapeProto'Dim'name = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message TensorShapeProto'Dim where
    +        descriptor
    +          = let size__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "size"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
    +                name__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, name__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("size", size__field_descriptor),
    +                    ("name", name__field_descriptor)])
    +
    +dim ::
    +    forall msg msg' . Data.ProtoLens.HasField "dim" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dim" msg)
    +        (Data.ProtoLens.Field "dim" msg')
    +dim
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dim")
    +
    +name ::
    +     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    +         (Data.ProtoLens.Field "name" msg')
    +name
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    +
    +size ::
    +     forall msg msg' . Data.ProtoLens.HasField "size" msg msg' =>
    +       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "size" msg)
    +         (Data.ProtoLens.Field "size" msg')
    +size
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "size")
    +
    +unknownRank ::
    +            forall msg msg' . Data.ProtoLens.HasField "unknownRank" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "unknownRank" msg)
    +                (Data.ProtoLens.Field "unknownRank" msg')
    +unknownRank
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "unknownRank")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html new file mode 100644 index 0000000..cb2dca2 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html @@ -0,0 +1,355 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Types.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/types.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.Types where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +
    +data DataType = DT_INVALID
    +              | DT_FLOAT
    +              | DT_DOUBLE
    +              | DT_INT32
    +              | DT_UINT8
    +              | DT_INT16
    +              | DT_INT8
    +              | DT_STRING
    +              | DT_COMPLEX64
    +              | DT_INT64
    +              | DT_BOOL
    +              | DT_QINT8
    +              | DT_QUINT8
    +              | DT_QINT32
    +              | DT_BFLOAT16
    +              | DT_QINT16
    +              | DT_QUINT16
    +              | DT_UINT16
    +              | DT_COMPLEX128
    +              | DT_HALF
    +              | DT_RESOURCE
    +              | DT_FLOAT_REF
    +              | DT_DOUBLE_REF
    +              | DT_INT32_REF
    +              | DT_UINT8_REF
    +              | DT_INT16_REF
    +              | DT_INT8_REF
    +              | DT_STRING_REF
    +              | DT_COMPLEX64_REF
    +              | DT_INT64_REF
    +              | DT_BOOL_REF
    +              | DT_QINT8_REF
    +              | DT_QUINT8_REF
    +              | DT_QINT32_REF
    +              | DT_BFLOAT16_REF
    +              | DT_QINT16_REF
    +              | DT_QUINT16_REF
    +              | DT_UINT16_REF
    +              | DT_COMPLEX128_REF
    +              | DT_HALF_REF
    +              | DT_RESOURCE_REF
    +              deriving (Prelude.Show, Prelude.Eq)
    +
    +instance Data.Default.Class.Default DataType where
    +        def = DT_INVALID
    +
    +instance Data.ProtoLens.FieldDefault DataType where
    +        fieldDefault = DT_INVALID
    +
    +instance Data.ProtoLens.MessageEnum DataType where
    +        maybeToEnum 0 = Prelude.Just DT_INVALID
    +        maybeToEnum 1 = Prelude.Just DT_FLOAT
    +        maybeToEnum 2 = Prelude.Just DT_DOUBLE
    +        maybeToEnum 3 = Prelude.Just DT_INT32
    +        maybeToEnum 4 = Prelude.Just DT_UINT8
    +        maybeToEnum 5 = Prelude.Just DT_INT16
    +        maybeToEnum 6 = Prelude.Just DT_INT8
    +        maybeToEnum 7 = Prelude.Just DT_STRING
    +        maybeToEnum 8 = Prelude.Just DT_COMPLEX64
    +        maybeToEnum 9 = Prelude.Just DT_INT64
    +        maybeToEnum 10 = Prelude.Just DT_BOOL
    +        maybeToEnum 11 = Prelude.Just DT_QINT8
    +        maybeToEnum 12 = Prelude.Just DT_QUINT8
    +        maybeToEnum 13 = Prelude.Just DT_QINT32
    +        maybeToEnum 14 = Prelude.Just DT_BFLOAT16
    +        maybeToEnum 15 = Prelude.Just DT_QINT16
    +        maybeToEnum 16 = Prelude.Just DT_QUINT16
    +        maybeToEnum 17 = Prelude.Just DT_UINT16
    +        maybeToEnum 18 = Prelude.Just DT_COMPLEX128
    +        maybeToEnum 19 = Prelude.Just DT_HALF
    +        maybeToEnum 20 = Prelude.Just DT_RESOURCE
    +        maybeToEnum 101 = Prelude.Just DT_FLOAT_REF
    +        maybeToEnum 102 = Prelude.Just DT_DOUBLE_REF
    +        maybeToEnum 103 = Prelude.Just DT_INT32_REF
    +        maybeToEnum 104 = Prelude.Just DT_UINT8_REF
    +        maybeToEnum 105 = Prelude.Just DT_INT16_REF
    +        maybeToEnum 106 = Prelude.Just DT_INT8_REF
    +        maybeToEnum 107 = Prelude.Just DT_STRING_REF
    +        maybeToEnum 108 = Prelude.Just DT_COMPLEX64_REF
    +        maybeToEnum 109 = Prelude.Just DT_INT64_REF
    +        maybeToEnum 110 = Prelude.Just DT_BOOL_REF
    +        maybeToEnum 111 = Prelude.Just DT_QINT8_REF
    +        maybeToEnum 112 = Prelude.Just DT_QUINT8_REF
    +        maybeToEnum 113 = Prelude.Just DT_QINT32_REF
    +        maybeToEnum 114 = Prelude.Just DT_BFLOAT16_REF
    +        maybeToEnum 115 = Prelude.Just DT_QINT16_REF
    +        maybeToEnum 116 = Prelude.Just DT_QUINT16_REF
    +        maybeToEnum 117 = Prelude.Just DT_UINT16_REF
    +        maybeToEnum 118 = Prelude.Just DT_COMPLEX128_REF
    +        maybeToEnum 119 = Prelude.Just DT_HALF_REF
    +        maybeToEnum 120 = Prelude.Just DT_RESOURCE_REF
    +        maybeToEnum _ = Prelude.Nothing
    +        showEnum DT_INVALID = "DT_INVALID"
    +        showEnum DT_FLOAT = "DT_FLOAT"
    +        showEnum DT_DOUBLE = "DT_DOUBLE"
    +        showEnum DT_INT32 = "DT_INT32"
    +        showEnum DT_UINT8 = "DT_UINT8"
    +        showEnum DT_INT16 = "DT_INT16"
    +        showEnum DT_INT8 = "DT_INT8"
    +        showEnum DT_STRING = "DT_STRING"
    +        showEnum DT_COMPLEX64 = "DT_COMPLEX64"
    +        showEnum DT_INT64 = "DT_INT64"
    +        showEnum DT_BOOL = "DT_BOOL"
    +        showEnum DT_QINT8 = "DT_QINT8"
    +        showEnum DT_QUINT8 = "DT_QUINT8"
    +        showEnum DT_QINT32 = "DT_QINT32"
    +        showEnum DT_BFLOAT16 = "DT_BFLOAT16"
    +        showEnum DT_QINT16 = "DT_QINT16"
    +        showEnum DT_QUINT16 = "DT_QUINT16"
    +        showEnum DT_UINT16 = "DT_UINT16"
    +        showEnum DT_COMPLEX128 = "DT_COMPLEX128"
    +        showEnum DT_HALF = "DT_HALF"
    +        showEnum DT_RESOURCE = "DT_RESOURCE"
    +        showEnum DT_FLOAT_REF = "DT_FLOAT_REF"
    +        showEnum DT_DOUBLE_REF = "DT_DOUBLE_REF"
    +        showEnum DT_INT32_REF = "DT_INT32_REF"
    +        showEnum DT_UINT8_REF = "DT_UINT8_REF"
    +        showEnum DT_INT16_REF = "DT_INT16_REF"
    +        showEnum DT_INT8_REF = "DT_INT8_REF"
    +        showEnum DT_STRING_REF = "DT_STRING_REF"
    +        showEnum DT_COMPLEX64_REF = "DT_COMPLEX64_REF"
    +        showEnum DT_INT64_REF = "DT_INT64_REF"
    +        showEnum DT_BOOL_REF = "DT_BOOL_REF"
    +        showEnum DT_QINT8_REF = "DT_QINT8_REF"
    +        showEnum DT_QUINT8_REF = "DT_QUINT8_REF"
    +        showEnum DT_QINT32_REF = "DT_QINT32_REF"
    +        showEnum DT_BFLOAT16_REF = "DT_BFLOAT16_REF"
    +        showEnum DT_QINT16_REF = "DT_QINT16_REF"
    +        showEnum DT_QUINT16_REF = "DT_QUINT16_REF"
    +        showEnum DT_UINT16_REF = "DT_UINT16_REF"
    +        showEnum DT_COMPLEX128_REF = "DT_COMPLEX128_REF"
    +        showEnum DT_HALF_REF = "DT_HALF_REF"
    +        showEnum DT_RESOURCE_REF = "DT_RESOURCE_REF"
    +        readEnum "DT_INVALID" = Prelude.Just DT_INVALID
    +        readEnum "DT_FLOAT" = Prelude.Just DT_FLOAT
    +        readEnum "DT_DOUBLE" = Prelude.Just DT_DOUBLE
    +        readEnum "DT_INT32" = Prelude.Just DT_INT32
    +        readEnum "DT_UINT8" = Prelude.Just DT_UINT8
    +        readEnum "DT_INT16" = Prelude.Just DT_INT16
    +        readEnum "DT_INT8" = Prelude.Just DT_INT8
    +        readEnum "DT_STRING" = Prelude.Just DT_STRING
    +        readEnum "DT_COMPLEX64" = Prelude.Just DT_COMPLEX64
    +        readEnum "DT_INT64" = Prelude.Just DT_INT64
    +        readEnum "DT_BOOL" = Prelude.Just DT_BOOL
    +        readEnum "DT_QINT8" = Prelude.Just DT_QINT8
    +        readEnum "DT_QUINT8" = Prelude.Just DT_QUINT8
    +        readEnum "DT_QINT32" = Prelude.Just DT_QINT32
    +        readEnum "DT_BFLOAT16" = Prelude.Just DT_BFLOAT16
    +        readEnum "DT_QINT16" = Prelude.Just DT_QINT16
    +        readEnum "DT_QUINT16" = Prelude.Just DT_QUINT16
    +        readEnum "DT_UINT16" = Prelude.Just DT_UINT16
    +        readEnum "DT_COMPLEX128" = Prelude.Just DT_COMPLEX128
    +        readEnum "DT_HALF" = Prelude.Just DT_HALF
    +        readEnum "DT_RESOURCE" = Prelude.Just DT_RESOURCE
    +        readEnum "DT_FLOAT_REF" = Prelude.Just DT_FLOAT_REF
    +        readEnum "DT_DOUBLE_REF" = Prelude.Just DT_DOUBLE_REF
    +        readEnum "DT_INT32_REF" = Prelude.Just DT_INT32_REF
    +        readEnum "DT_UINT8_REF" = Prelude.Just DT_UINT8_REF
    +        readEnum "DT_INT16_REF" = Prelude.Just DT_INT16_REF
    +        readEnum "DT_INT8_REF" = Prelude.Just DT_INT8_REF
    +        readEnum "DT_STRING_REF" = Prelude.Just DT_STRING_REF
    +        readEnum "DT_COMPLEX64_REF" = Prelude.Just DT_COMPLEX64_REF
    +        readEnum "DT_INT64_REF" = Prelude.Just DT_INT64_REF
    +        readEnum "DT_BOOL_REF" = Prelude.Just DT_BOOL_REF
    +        readEnum "DT_QINT8_REF" = Prelude.Just DT_QINT8_REF
    +        readEnum "DT_QUINT8_REF" = Prelude.Just DT_QUINT8_REF
    +        readEnum "DT_QINT32_REF" = Prelude.Just DT_QINT32_REF
    +        readEnum "DT_BFLOAT16_REF" = Prelude.Just DT_BFLOAT16_REF
    +        readEnum "DT_QINT16_REF" = Prelude.Just DT_QINT16_REF
    +        readEnum "DT_QUINT16_REF" = Prelude.Just DT_QUINT16_REF
    +        readEnum "DT_UINT16_REF" = Prelude.Just DT_UINT16_REF
    +        readEnum "DT_COMPLEX128_REF" = Prelude.Just DT_COMPLEX128_REF
    +        readEnum "DT_HALF_REF" = Prelude.Just DT_HALF_REF
    +        readEnum "DT_RESOURCE_REF" = Prelude.Just DT_RESOURCE_REF
    +        readEnum _ = Prelude.Nothing
    +
    +instance Prelude.Enum DataType where
    +        toEnum k__
    +          = Prelude.maybe
    +              (Prelude.error
    +                 ((Prelude.++) "toEnum: unknown value for enum DataType: "
    +                    (Prelude.show k__)))
    +              Prelude.id
    +              (Data.ProtoLens.maybeToEnum k__)
    +        fromEnum DT_INVALID = 0
    +        fromEnum DT_FLOAT = 1
    +        fromEnum DT_DOUBLE = 2
    +        fromEnum DT_INT32 = 3
    +        fromEnum DT_UINT8 = 4
    +        fromEnum DT_INT16 = 5
    +        fromEnum DT_INT8 = 6
    +        fromEnum DT_STRING = 7
    +        fromEnum DT_COMPLEX64 = 8
    +        fromEnum DT_INT64 = 9
    +        fromEnum DT_BOOL = 10
    +        fromEnum DT_QINT8 = 11
    +        fromEnum DT_QUINT8 = 12
    +        fromEnum DT_QINT32 = 13
    +        fromEnum DT_BFLOAT16 = 14
    +        fromEnum DT_QINT16 = 15
    +        fromEnum DT_QUINT16 = 16
    +        fromEnum DT_UINT16 = 17
    +        fromEnum DT_COMPLEX128 = 18
    +        fromEnum DT_HALF = 19
    +        fromEnum DT_RESOURCE = 20
    +        fromEnum DT_FLOAT_REF = 101
    +        fromEnum DT_DOUBLE_REF = 102
    +        fromEnum DT_INT32_REF = 103
    +        fromEnum DT_UINT8_REF = 104
    +        fromEnum DT_INT16_REF = 105
    +        fromEnum DT_INT8_REF = 106
    +        fromEnum DT_STRING_REF = 107
    +        fromEnum DT_COMPLEX64_REF = 108
    +        fromEnum DT_INT64_REF = 109
    +        fromEnum DT_BOOL_REF = 110
    +        fromEnum DT_QINT8_REF = 111
    +        fromEnum DT_QUINT8_REF = 112
    +        fromEnum DT_QINT32_REF = 113
    +        fromEnum DT_BFLOAT16_REF = 114
    +        fromEnum DT_QINT16_REF = 115
    +        fromEnum DT_QUINT16_REF = 116
    +        fromEnum DT_UINT16_REF = 117
    +        fromEnum DT_COMPLEX128_REF = 118
    +        fromEnum DT_HALF_REF = 119
    +        fromEnum DT_RESOURCE_REF = 120
    +        succ DT_RESOURCE_REF
    +          = Prelude.error
    +              "Ident \"DataType\".Ident \"succ\": bad argument Ident \"DT_RESOURCE_REF\". This value would be out of bounds."
    +        succ DT_INVALID = DT_FLOAT
    +        succ DT_FLOAT = DT_DOUBLE
    +        succ DT_DOUBLE = DT_INT32
    +        succ DT_INT32 = DT_UINT8
    +        succ DT_UINT8 = DT_INT16
    +        succ DT_INT16 = DT_INT8
    +        succ DT_INT8 = DT_STRING
    +        succ DT_STRING = DT_COMPLEX64
    +        succ DT_COMPLEX64 = DT_INT64
    +        succ DT_INT64 = DT_BOOL
    +        succ DT_BOOL = DT_QINT8
    +        succ DT_QINT8 = DT_QUINT8
    +        succ DT_QUINT8 = DT_QINT32
    +        succ DT_QINT32 = DT_BFLOAT16
    +        succ DT_BFLOAT16 = DT_QINT16
    +        succ DT_QINT16 = DT_QUINT16
    +        succ DT_QUINT16 = DT_UINT16
    +        succ DT_UINT16 = DT_COMPLEX128
    +        succ DT_COMPLEX128 = DT_HALF
    +        succ DT_HALF = DT_RESOURCE
    +        succ DT_RESOURCE = DT_FLOAT_REF
    +        succ DT_FLOAT_REF = DT_DOUBLE_REF
    +        succ DT_DOUBLE_REF = DT_INT32_REF
    +        succ DT_INT32_REF = DT_UINT8_REF
    +        succ DT_UINT8_REF = DT_INT16_REF
    +        succ DT_INT16_REF = DT_INT8_REF
    +        succ DT_INT8_REF = DT_STRING_REF
    +        succ DT_STRING_REF = DT_COMPLEX64_REF
    +        succ DT_COMPLEX64_REF = DT_INT64_REF
    +        succ DT_INT64_REF = DT_BOOL_REF
    +        succ DT_BOOL_REF = DT_QINT8_REF
    +        succ DT_QINT8_REF = DT_QUINT8_REF
    +        succ DT_QUINT8_REF = DT_QINT32_REF
    +        succ DT_QINT32_REF = DT_BFLOAT16_REF
    +        succ DT_BFLOAT16_REF = DT_QINT16_REF
    +        succ DT_QINT16_REF = DT_QUINT16_REF
    +        succ DT_QUINT16_REF = DT_UINT16_REF
    +        succ DT_UINT16_REF = DT_COMPLEX128_REF
    +        succ DT_COMPLEX128_REF = DT_HALF_REF
    +        succ DT_HALF_REF = DT_RESOURCE_REF
    +        pred DT_INVALID
    +          = Prelude.error
    +              "Ident \"DataType\".Ident \"pred\": bad argument Ident \"DT_INVALID\". This value would be out of bounds."
    +        pred DT_FLOAT = DT_INVALID
    +        pred DT_DOUBLE = DT_FLOAT
    +        pred DT_INT32 = DT_DOUBLE
    +        pred DT_UINT8 = DT_INT32
    +        pred DT_INT16 = DT_UINT8
    +        pred DT_INT8 = DT_INT16
    +        pred DT_STRING = DT_INT8
    +        pred DT_COMPLEX64 = DT_STRING
    +        pred DT_INT64 = DT_COMPLEX64
    +        pred DT_BOOL = DT_INT64
    +        pred DT_QINT8 = DT_BOOL
    +        pred DT_QUINT8 = DT_QINT8
    +        pred DT_QINT32 = DT_QUINT8
    +        pred DT_BFLOAT16 = DT_QINT32
    +        pred DT_QINT16 = DT_BFLOAT16
    +        pred DT_QUINT16 = DT_QINT16
    +        pred DT_UINT16 = DT_QUINT16
    +        pred DT_COMPLEX128 = DT_UINT16
    +        pred DT_HALF = DT_COMPLEX128
    +        pred DT_RESOURCE = DT_HALF
    +        pred DT_FLOAT_REF = DT_RESOURCE
    +        pred DT_DOUBLE_REF = DT_FLOAT_REF
    +        pred DT_INT32_REF = DT_DOUBLE_REF
    +        pred DT_UINT8_REF = DT_INT32_REF
    +        pred DT_INT16_REF = DT_UINT8_REF
    +        pred DT_INT8_REF = DT_INT16_REF
    +        pred DT_STRING_REF = DT_INT8_REF
    +        pred DT_COMPLEX64_REF = DT_STRING_REF
    +        pred DT_INT64_REF = DT_COMPLEX64_REF
    +        pred DT_BOOL_REF = DT_INT64_REF
    +        pred DT_QINT8_REF = DT_BOOL_REF
    +        pred DT_QUINT8_REF = DT_QINT8_REF
    +        pred DT_QINT32_REF = DT_QUINT8_REF
    +        pred DT_BFLOAT16_REF = DT_QINT32_REF
    +        pred DT_QINT16_REF = DT_BFLOAT16_REF
    +        pred DT_QUINT16_REF = DT_QINT16_REF
    +        pred DT_UINT16_REF = DT_QUINT16_REF
    +        pred DT_COMPLEX128_REF = DT_UINT16_REF
    +        pred DT_HALF_REF = DT_COMPLEX128_REF
    +        pred DT_RESOURCE_REF = DT_HALF_REF
    +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    +
    +instance Prelude.Bounded DataType where
    +        minBound = DT_INVALID
    +        maxBound = DT_RESOURCE_REF
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html new file mode 100644 index 0000000..aa80fb3 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html @@ -0,0 +1,128 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Versions.hs + + + +
    {- This file was auto-generated from tensorflow/core/framework/versions.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Framework.Versions where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +
    +data VersionDef = VersionDef{_VersionDef'producer ::
    +                             Data.Int.Int32,
    +                             _VersionDef'minConsumer :: Data.Int.Int32,
    +                             _VersionDef'badConsumers :: [Data.Int.Int32]}
    +                deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "producer" VersionDef =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "producer" VersionDef VersionDef
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _VersionDef'producer
    +              (\ x__ y__ -> x__{_VersionDef'producer = y__})
    +
    +type instance Data.ProtoLens.Field "minConsumer" VersionDef =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "minConsumer" VersionDef
    +         VersionDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _VersionDef'minConsumer
    +              (\ x__ y__ -> x__{_VersionDef'minConsumer = y__})
    +
    +type instance Data.ProtoLens.Field "badConsumers" VersionDef =
    +     [Data.Int.Int32]
    +
    +instance Data.ProtoLens.HasField "badConsumers" VersionDef
    +         VersionDef where
    +        field _
    +          = Lens.Family2.Unchecked.lens _VersionDef'badConsumers
    +              (\ x__ y__ -> x__{_VersionDef'badConsumers = y__})
    +
    +instance Data.Default.Class.Default VersionDef where
    +        def
    +          = VersionDef{_VersionDef'producer = Data.ProtoLens.fieldDefault,
    +                       _VersionDef'minConsumer = Data.ProtoLens.fieldDefault,
    +                       _VersionDef'badConsumers = []}
    +
    +instance Data.ProtoLens.Message VersionDef where
    +        descriptor
    +          = let producer__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "producer"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional producer)
    +                minConsumer__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "min_consumer"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minConsumer)
    +                badConsumers__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "bad_consumers"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked badConsumers)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, producer__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, minConsumer__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, badConsumers__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("producer", producer__field_descriptor),
    +                    ("min_consumer", minConsumer__field_descriptor),
    +                    ("bad_consumers", badConsumers__field_descriptor)])
    +
    +badConsumers ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "badConsumers" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "badConsumers" msg)
    +                 (Data.ProtoLens.Field "badConsumers" msg')
    +badConsumers
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "badConsumers")
    +
    +minConsumer ::
    +            forall msg msg' . Data.ProtoLens.HasField "minConsumer" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "minConsumer" msg)
    +                (Data.ProtoLens.Field "minConsumer" msg')
    +minConsumer
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "minConsumer")
    +
    +producer ::
    +         forall msg msg' . Data.ProtoLens.HasField "producer" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "producer" msg)
    +             (Data.ProtoLens.Field "producer" msg')
    +producer
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "producer")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html new file mode 100644 index 0000000..e91a36d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html @@ -0,0 +1,1671 @@ + + + + + +.stack-work/dist/x86_64-linux-dkd1ce2ff9c9560b648268df668d177711/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Protobuf/Config.hs + + + +
    {- This file was auto-generated from tensorflow/core/protobuf/config.proto by the proto-lens-protoc program. -}
    +{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    +  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    +  PatternSynonyms #-}
    +{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    +module Proto.Tensorflow.Core.Protobuf.Config where
    +import qualified Prelude
    +import qualified Data.Int
    +import qualified Data.Word
    +import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    +       as Data.ProtoLens
    +import qualified
    +       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    +       as Data.ProtoLens.Message.Enum
    +import qualified Data.ProtoLens.Reexport.Lens.Family2
    +       as Lens.Family2
    +import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    +       as Lens.Family2.Unchecked
    +import qualified Data.ProtoLens.Reexport.Data.Default.Class
    +       as Data.Default.Class
    +import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    +import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    +import qualified Data.ProtoLens.Reexport.Data.ByteString
    +       as Data.ByteString
    +import qualified Proto.Tensorflow.Core.Framework.CostGraph
    +import qualified Proto.Tensorflow.Core.Framework.Graph
    +import qualified Proto.Tensorflow.Core.Framework.StepStats
    +
    +data ConfigProto = ConfigProto{_ConfigProto'deviceCount ::
    +                               Data.Map.Map Data.Text.Text Data.Int.Int32,
    +                               _ConfigProto'intraOpParallelismThreads :: Data.Int.Int32,
    +                               _ConfigProto'interOpParallelismThreads :: Data.Int.Int32,
    +                               _ConfigProto'usePerSessionThreads :: Prelude.Bool,
    +                               _ConfigProto'sessionInterOpThreadPool :: [ThreadPoolOptionProto],
    +                               _ConfigProto'placementPeriod :: Data.Int.Int32,
    +                               _ConfigProto'deviceFilters :: [Data.Text.Text],
    +                               _ConfigProto'gpuOptions :: Prelude.Maybe GPUOptions,
    +                               _ConfigProto'allowSoftPlacement :: Prelude.Bool,
    +                               _ConfigProto'logDevicePlacement :: Prelude.Bool,
    +                               _ConfigProto'graphOptions :: Prelude.Maybe GraphOptions,
    +                               _ConfigProto'operationTimeoutInMs :: Data.Int.Int64}
    +                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "deviceCount" ConfigProto =
    +     Data.Map.Map Data.Text.Text Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "deviceCount" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'deviceCount
    +              (\ x__ y__ -> x__{_ConfigProto'deviceCount = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "intraOpParallelismThreads" ConfigProto =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "intraOpParallelismThreads"
    +         ConfigProto ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _ConfigProto'intraOpParallelismThreads
    +              (\ x__ y__ -> x__{_ConfigProto'intraOpParallelismThreads = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "interOpParallelismThreads" ConfigProto =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "interOpParallelismThreads"
    +         ConfigProto ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _ConfigProto'interOpParallelismThreads
    +              (\ x__ y__ -> x__{_ConfigProto'interOpParallelismThreads = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "usePerSessionThreads" ConfigProto =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "usePerSessionThreads" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'usePerSessionThreads
    +              (\ x__ y__ -> x__{_ConfigProto'usePerSessionThreads = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "sessionInterOpThreadPool" ConfigProto =
    +     [ThreadPoolOptionProto]
    +
    +instance Data.ProtoLens.HasField "sessionInterOpThreadPool"
    +         ConfigProto ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'sessionInterOpThreadPool
    +              (\ x__ y__ -> x__{_ConfigProto'sessionInterOpThreadPool = y__})
    +
    +type instance Data.ProtoLens.Field "placementPeriod" ConfigProto =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "placementPeriod" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'placementPeriod
    +              (\ x__ y__ -> x__{_ConfigProto'placementPeriod = y__})
    +
    +type instance Data.ProtoLens.Field "deviceFilters" ConfigProto =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "deviceFilters" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'deviceFilters
    +              (\ x__ y__ -> x__{_ConfigProto'deviceFilters = y__})
    +
    +type instance Data.ProtoLens.Field "gpuOptions" ConfigProto =
    +     GPUOptions
    +
    +instance Data.ProtoLens.HasField "gpuOptions" ConfigProto
    +         ConfigProto where
    +        field _
    +          = (Prelude..) maybe'gpuOptions
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'gpuOptions" ConfigProto =
    +     Prelude.Maybe GPUOptions
    +
    +instance Data.ProtoLens.HasField "maybe'gpuOptions" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'gpuOptions
    +              (\ x__ y__ -> x__{_ConfigProto'gpuOptions = y__})
    +
    +type instance Data.ProtoLens.Field "allowSoftPlacement" ConfigProto
    +     = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "allowSoftPlacement" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'allowSoftPlacement
    +              (\ x__ y__ -> x__{_ConfigProto'allowSoftPlacement = y__})
    +
    +type instance Data.ProtoLens.Field "logDevicePlacement" ConfigProto
    +     = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "logDevicePlacement" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'logDevicePlacement
    +              (\ x__ y__ -> x__{_ConfigProto'logDevicePlacement = y__})
    +
    +type instance Data.ProtoLens.Field "graphOptions" ConfigProto =
    +     GraphOptions
    +
    +instance Data.ProtoLens.HasField "graphOptions" ConfigProto
    +         ConfigProto where
    +        field _
    +          = (Prelude..) maybe'graphOptions
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'graphOptions" ConfigProto
    +     = Prelude.Maybe GraphOptions
    +
    +instance Data.ProtoLens.HasField "maybe'graphOptions" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'graphOptions
    +              (\ x__ y__ -> x__{_ConfigProto'graphOptions = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "operationTimeoutInMs" ConfigProto =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "operationTimeoutInMs" ConfigProto
    +         ConfigProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'operationTimeoutInMs
    +              (\ x__ y__ -> x__{_ConfigProto'operationTimeoutInMs = y__})
    +
    +instance Data.Default.Class.Default ConfigProto where
    +        def
    +          = ConfigProto{_ConfigProto'deviceCount = Data.Map.empty,
    +                        _ConfigProto'intraOpParallelismThreads =
    +                          Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'interOpParallelismThreads =
    +                          Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'usePerSessionThreads = Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'sessionInterOpThreadPool = [],
    +                        _ConfigProto'placementPeriod = Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'deviceFilters = [],
    +                        _ConfigProto'gpuOptions = Prelude.Nothing,
    +                        _ConfigProto'allowSoftPlacement = Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'logDevicePlacement = Data.ProtoLens.fieldDefault,
    +                        _ConfigProto'graphOptions = Prelude.Nothing,
    +                        _ConfigProto'operationTimeoutInMs = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message ConfigProto where
    +        descriptor
    +          = let deviceCount__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device_count"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor ConfigProto'DeviceCountEntry)
    +                      (Data.ProtoLens.MapField key value deviceCount)
    +                intraOpParallelismThreads__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "intra_op_parallelism_threads"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         intraOpParallelismThreads)
    +                interOpParallelismThreads__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "inter_op_parallelism_threads"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         interOpParallelismThreads)
    +                usePerSessionThreads__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "use_per_session_threads"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         usePerSessionThreads)
    +                sessionInterOpThreadPool__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "session_inter_op_thread_pool"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor ThreadPoolOptionProto)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         sessionInterOpThreadPool)
    +                placementPeriod__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "placement_period"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional placementPeriod)
    +                deviceFilters__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "device_filters"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         deviceFilters)
    +                gpuOptions__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "gpu_options"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor GPUOptions)
    +                      (Data.ProtoLens.OptionalField maybe'gpuOptions)
    +                allowSoftPlacement__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allow_soft_placement"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         allowSoftPlacement)
    +                logDevicePlacement__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "log_device_placement"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         logDevicePlacement)
    +                graphOptions__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "graph_options"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor GraphOptions)
    +                      (Data.ProtoLens.OptionalField maybe'graphOptions)
    +                operationTimeoutInMs__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "operation_timeout_in_ms"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         operationTimeoutInMs)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, deviceCount__field_descriptor),
    +                    (Data.ProtoLens.Tag 2,
    +                     intraOpParallelismThreads__field_descriptor),
    +                    (Data.ProtoLens.Tag 5,
    +                     interOpParallelismThreads__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, usePerSessionThreads__field_descriptor),
    +                    (Data.ProtoLens.Tag 12,
    +                     sessionInterOpThreadPool__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, placementPeriod__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, deviceFilters__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, gpuOptions__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, allowSoftPlacement__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, logDevicePlacement__field_descriptor),
    +                    (Data.ProtoLens.Tag 10, graphOptions__field_descriptor),
    +                    (Data.ProtoLens.Tag 11, operationTimeoutInMs__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("device_count", deviceCount__field_descriptor),
    +                    ("intra_op_parallelism_threads",
    +                     intraOpParallelismThreads__field_descriptor),
    +                    ("inter_op_parallelism_threads",
    +                     interOpParallelismThreads__field_descriptor),
    +                    ("use_per_session_threads",
    +                     usePerSessionThreads__field_descriptor),
    +                    ("session_inter_op_thread_pool",
    +                     sessionInterOpThreadPool__field_descriptor),
    +                    ("placement_period", placementPeriod__field_descriptor),
    +                    ("device_filters", deviceFilters__field_descriptor),
    +                    ("gpu_options", gpuOptions__field_descriptor),
    +                    ("allow_soft_placement", allowSoftPlacement__field_descriptor),
    +                    ("log_device_placement", logDevicePlacement__field_descriptor),
    +                    ("graph_options", graphOptions__field_descriptor),
    +                    ("operation_timeout_in_ms",
    +                     operationTimeoutInMs__field_descriptor)])
    +
    +data ConfigProto'DeviceCountEntry = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key
    +                                                                 :: Data.Text.Text,
    +                                                                 _ConfigProto'DeviceCountEntry'value
    +                                                                 :: Data.Int.Int32}
    +                                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "key" ConfigProto'DeviceCountEntry =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "key" ConfigProto'DeviceCountEntry
    +         ConfigProto'DeviceCountEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'key
    +              (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'key = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "value" ConfigProto'DeviceCountEntry =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "value"
    +         ConfigProto'DeviceCountEntry ConfigProto'DeviceCountEntry where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'value
    +              (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'value = y__})
    +
    +instance Data.Default.Class.Default ConfigProto'DeviceCountEntry
    +         where
    +        def
    +          = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key =
    +                                           Data.ProtoLens.fieldDefault,
    +                                         _ConfigProto'DeviceCountEntry'value =
    +                                           Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message ConfigProto'DeviceCountEntry where
    +        descriptor
    +          = let key__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "key"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    +                value__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "value"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("key", key__field_descriptor),
    +                    ("value", value__field_descriptor)])
    +
    +data DebugTensorWatch = DebugTensorWatch{_DebugTensorWatch'nodeName
    +                                         :: Data.Text.Text,
    +                                         _DebugTensorWatch'outputSlot :: Data.Int.Int32,
    +                                         _DebugTensorWatch'debugOps :: [Data.Text.Text],
    +                                         _DebugTensorWatch'debugUrls :: [Data.Text.Text]}
    +                      deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "nodeName" DebugTensorWatch =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "nodeName" DebugTensorWatch
    +         DebugTensorWatch where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DebugTensorWatch'nodeName
    +              (\ x__ y__ -> x__{_DebugTensorWatch'nodeName = y__})
    +
    +type instance Data.ProtoLens.Field "outputSlot" DebugTensorWatch =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "outputSlot" DebugTensorWatch
    +         DebugTensorWatch where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DebugTensorWatch'outputSlot
    +              (\ x__ y__ -> x__{_DebugTensorWatch'outputSlot = y__})
    +
    +type instance Data.ProtoLens.Field "debugOps" DebugTensorWatch =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "debugOps" DebugTensorWatch
    +         DebugTensorWatch where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DebugTensorWatch'debugOps
    +              (\ x__ y__ -> x__{_DebugTensorWatch'debugOps = y__})
    +
    +type instance Data.ProtoLens.Field "debugUrls" DebugTensorWatch =
    +     [Data.Text.Text]
    +
    +instance Data.ProtoLens.HasField "debugUrls" DebugTensorWatch
    +         DebugTensorWatch where
    +        field _
    +          = Lens.Family2.Unchecked.lens _DebugTensorWatch'debugUrls
    +              (\ x__ y__ -> x__{_DebugTensorWatch'debugUrls = y__})
    +
    +instance Data.Default.Class.Default DebugTensorWatch where
    +        def
    +          = DebugTensorWatch{_DebugTensorWatch'nodeName =
    +                               Data.ProtoLens.fieldDefault,
    +                             _DebugTensorWatch'outputSlot = Data.ProtoLens.fieldDefault,
    +                             _DebugTensorWatch'debugOps = [], _DebugTensorWatch'debugUrls = []}
    +
    +instance Data.ProtoLens.Message DebugTensorWatch where
    +        descriptor
    +          = let nodeName__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "node_name"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
    +                outputSlot__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "output_slot"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional outputSlot)
    +                debugOps__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "debug_ops"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugOps)
    +                debugUrls__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "debug_urls"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugUrls)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, outputSlot__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, debugOps__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, debugUrls__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("node_name", nodeName__field_descriptor),
    +                    ("output_slot", outputSlot__field_descriptor),
    +                    ("debug_ops", debugOps__field_descriptor),
    +                    ("debug_urls", debugUrls__field_descriptor)])
    +
    +data GPUOptions = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction
    +                             :: Prelude.Double,
    +                             _GPUOptions'allocatorType :: Data.Text.Text,
    +                             _GPUOptions'deferredDeletionBytes :: Data.Int.Int64,
    +                             _GPUOptions'allowGrowth :: Prelude.Bool,
    +                             _GPUOptions'visibleDeviceList :: Data.Text.Text}
    +                deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "perProcessGpuMemoryFraction" GPUOptions =
    +     Prelude.Double
    +
    +instance Data.ProtoLens.HasField "perProcessGpuMemoryFraction"
    +         GPUOptions GPUOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _GPUOptions'perProcessGpuMemoryFraction
    +              (\ x__ y__ -> x__{_GPUOptions'perProcessGpuMemoryFraction = y__})
    +
    +type instance Data.ProtoLens.Field "allocatorType" GPUOptions =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "allocatorType" GPUOptions
    +         GPUOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GPUOptions'allocatorType
    +              (\ x__ y__ -> x__{_GPUOptions'allocatorType = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "deferredDeletionBytes" GPUOptions =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "deferredDeletionBytes" GPUOptions
    +         GPUOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GPUOptions'deferredDeletionBytes
    +              (\ x__ y__ -> x__{_GPUOptions'deferredDeletionBytes = y__})
    +
    +type instance Data.ProtoLens.Field "allowGrowth" GPUOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "allowGrowth" GPUOptions
    +         GPUOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GPUOptions'allowGrowth
    +              (\ x__ y__ -> x__{_GPUOptions'allowGrowth = y__})
    +
    +type instance Data.ProtoLens.Field "visibleDeviceList" GPUOptions =
    +     Data.Text.Text
    +
    +instance Data.ProtoLens.HasField "visibleDeviceList" GPUOptions
    +         GPUOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GPUOptions'visibleDeviceList
    +              (\ x__ y__ -> x__{_GPUOptions'visibleDeviceList = y__})
    +
    +instance Data.Default.Class.Default GPUOptions where
    +        def
    +          = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction =
    +                         Data.ProtoLens.fieldDefault,
    +                       _GPUOptions'allocatorType = Data.ProtoLens.fieldDefault,
    +                       _GPUOptions'deferredDeletionBytes = Data.ProtoLens.fieldDefault,
    +                       _GPUOptions'allowGrowth = Data.ProtoLens.fieldDefault,
    +                       _GPUOptions'visibleDeviceList = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message GPUOptions where
    +        descriptor
    +          = let perProcessGpuMemoryFraction__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "per_process_gpu_memory_fraction"
    +                      (Data.ProtoLens.DoubleField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         perProcessGpuMemoryFraction)
    +                allocatorType__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allocator_type"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorType)
    +                deferredDeletionBytes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "deferred_deletion_bytes"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         deferredDeletionBytes)
    +                allowGrowth__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "allow_growth"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allowGrowth)
    +                visibleDeviceList__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "visible_device_list"
    +                      (Data.ProtoLens.StringField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         visibleDeviceList)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1,
    +                     perProcessGpuMemoryFraction__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, allocatorType__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, deferredDeletionBytes__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, allowGrowth__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, visibleDeviceList__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("per_process_gpu_memory_fraction",
    +                     perProcessGpuMemoryFraction__field_descriptor),
    +                    ("allocator_type", allocatorType__field_descriptor),
    +                    ("deferred_deletion_bytes",
    +                     deferredDeletionBytes__field_descriptor),
    +                    ("allow_growth", allowGrowth__field_descriptor),
    +                    ("visible_device_list", visibleDeviceList__field_descriptor)])
    +
    +data GraphOptions = GraphOptions{_GraphOptions'enableRecvScheduling
    +                                 :: Prelude.Bool,
    +                                 _GraphOptions'optimizerOptions :: Prelude.Maybe OptimizerOptions,
    +                                 _GraphOptions'buildCostModel :: Data.Int.Int64,
    +                                 _GraphOptions'buildCostModelAfter :: Data.Int.Int64,
    +                                 _GraphOptions'inferShapes :: Prelude.Bool,
    +                                 _GraphOptions'placePrunedGraph :: Prelude.Bool,
    +                                 _GraphOptions'enableBfloat16Sendrecv :: Prelude.Bool,
    +                                 _GraphOptions'timelineStep :: Data.Int.Int32}
    +                  deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "enableRecvScheduling" GraphOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "enableRecvScheduling"
    +         GraphOptions GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'enableRecvScheduling
    +              (\ x__ y__ -> x__{_GraphOptions'enableRecvScheduling = y__})
    +
    +type instance Data.ProtoLens.Field "optimizerOptions" GraphOptions
    +     = OptimizerOptions
    +
    +instance Data.ProtoLens.HasField "optimizerOptions" GraphOptions
    +         GraphOptions where
    +        field _
    +          = (Prelude..) maybe'optimizerOptions
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance
    +     Data.ProtoLens.Field "maybe'optimizerOptions" GraphOptions =
    +     Prelude.Maybe OptimizerOptions
    +
    +instance Data.ProtoLens.HasField "maybe'optimizerOptions"
    +         GraphOptions GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'optimizerOptions
    +              (\ x__ y__ -> x__{_GraphOptions'optimizerOptions = y__})
    +
    +type instance Data.ProtoLens.Field "buildCostModel" GraphOptions =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "buildCostModel" GraphOptions
    +         GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'buildCostModel
    +              (\ x__ y__ -> x__{_GraphOptions'buildCostModel = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "buildCostModelAfter" GraphOptions =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "buildCostModelAfter" GraphOptions
    +         GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'buildCostModelAfter
    +              (\ x__ y__ -> x__{_GraphOptions'buildCostModelAfter = y__})
    +
    +type instance Data.ProtoLens.Field "inferShapes" GraphOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "inferShapes" GraphOptions
    +         GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'inferShapes
    +              (\ x__ y__ -> x__{_GraphOptions'inferShapes = y__})
    +
    +type instance Data.ProtoLens.Field "placePrunedGraph" GraphOptions
    +     = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "placePrunedGraph" GraphOptions
    +         GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'placePrunedGraph
    +              (\ x__ y__ -> x__{_GraphOptions'placePrunedGraph = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "enableBfloat16Sendrecv" GraphOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "enableBfloat16Sendrecv"
    +         GraphOptions GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'enableBfloat16Sendrecv
    +              (\ x__ y__ -> x__{_GraphOptions'enableBfloat16Sendrecv = y__})
    +
    +type instance Data.ProtoLens.Field "timelineStep" GraphOptions =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "timelineStep" GraphOptions
    +         GraphOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _GraphOptions'timelineStep
    +              (\ x__ y__ -> x__{_GraphOptions'timelineStep = y__})
    +
    +instance Data.Default.Class.Default GraphOptions where
    +        def
    +          = GraphOptions{_GraphOptions'enableRecvScheduling =
    +                           Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'optimizerOptions = Prelude.Nothing,
    +                         _GraphOptions'buildCostModel = Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'buildCostModelAfter = Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'inferShapes = Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'placePrunedGraph = Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'enableBfloat16Sendrecv = Data.ProtoLens.fieldDefault,
    +                         _GraphOptions'timelineStep = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message GraphOptions where
    +        descriptor
    +          = let enableRecvScheduling__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "enable_recv_scheduling"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         enableRecvScheduling)
    +                optimizerOptions__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "optimizer_options"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions)
    +                      (Data.ProtoLens.OptionalField maybe'optimizerOptions)
    +                buildCostModel__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "build_cost_model"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional buildCostModel)
    +                buildCostModelAfter__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "build_cost_model_after"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         buildCostModelAfter)
    +                inferShapes__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "infer_shapes"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional inferShapes)
    +                placePrunedGraph__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "place_pruned_graph"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         placePrunedGraph)
    +                enableBfloat16Sendrecv__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "enable_bfloat16_sendrecv"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         enableBfloat16Sendrecv)
    +                timelineStep__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "timeline_step"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineStep)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 2, enableRecvScheduling__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, optimizerOptions__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, buildCostModel__field_descriptor),
    +                    (Data.ProtoLens.Tag 9, buildCostModelAfter__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, inferShapes__field_descriptor),
    +                    (Data.ProtoLens.Tag 6, placePrunedGraph__field_descriptor),
    +                    (Data.ProtoLens.Tag 7, enableBfloat16Sendrecv__field_descriptor),
    +                    (Data.ProtoLens.Tag 8, timelineStep__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("enable_recv_scheduling",
    +                     enableRecvScheduling__field_descriptor),
    +                    ("optimizer_options", optimizerOptions__field_descriptor),
    +                    ("build_cost_model", buildCostModel__field_descriptor),
    +                    ("build_cost_model_after", buildCostModelAfter__field_descriptor),
    +                    ("infer_shapes", inferShapes__field_descriptor),
    +                    ("place_pruned_graph", placePrunedGraph__field_descriptor),
    +                    ("enable_bfloat16_sendrecv",
    +                     enableBfloat16Sendrecv__field_descriptor),
    +                    ("timeline_step", timelineStep__field_descriptor)])
    +
    +data OptimizerOptions = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
    +                                         :: Prelude.Bool,
    +                                         _OptimizerOptions'doConstantFolding :: Prelude.Bool,
    +                                         _OptimizerOptions'doFunctionInlining :: Prelude.Bool,
    +                                         _OptimizerOptions'optLevel :: OptimizerOptions'Level}
    +                      deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "doCommonSubexpressionElimination"
    +       OptimizerOptions
    +     = Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "doCommonSubexpressionElimination"
    +         OptimizerOptions OptimizerOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens
    +              _OptimizerOptions'doCommonSubexpressionElimination
    +              (\ x__ y__ ->
    +                 x__{_OptimizerOptions'doCommonSubexpressionElimination = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "doConstantFolding" OptimizerOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "doConstantFolding"
    +         OptimizerOptions OptimizerOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OptimizerOptions'doConstantFolding
    +              (\ x__ y__ -> x__{_OptimizerOptions'doConstantFolding = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "doFunctionInlining" OptimizerOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "doFunctionInlining"
    +         OptimizerOptions OptimizerOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OptimizerOptions'doFunctionInlining
    +              (\ x__ y__ -> x__{_OptimizerOptions'doFunctionInlining = y__})
    +
    +type instance Data.ProtoLens.Field "optLevel" OptimizerOptions =
    +     OptimizerOptions'Level
    +
    +instance Data.ProtoLens.HasField "optLevel" OptimizerOptions
    +         OptimizerOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _OptimizerOptions'optLevel
    +              (\ x__ y__ -> x__{_OptimizerOptions'optLevel = y__})
    +
    +instance Data.Default.Class.Default OptimizerOptions where
    +        def
    +          = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
    +                               = Data.ProtoLens.fieldDefault,
    +                             _OptimizerOptions'doConstantFolding = Data.ProtoLens.fieldDefault,
    +                             _OptimizerOptions'doFunctionInlining = Data.ProtoLens.fieldDefault,
    +                             _OptimizerOptions'optLevel = Data.Default.Class.def}
    +
    +instance Data.ProtoLens.Message OptimizerOptions where
    +        descriptor
    +          = let doCommonSubexpressionElimination__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor
    +                      "do_common_subexpression_elimination"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         doCommonSubexpressionElimination)
    +                doConstantFolding__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "do_constant_folding"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         doConstantFolding)
    +                doFunctionInlining__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "do_function_inlining"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         doFunctionInlining)
    +                optLevel__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "opt_level"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions'Level)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional optLevel)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1,
    +                     doCommonSubexpressionElimination__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, doConstantFolding__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, doFunctionInlining__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, optLevel__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("do_common_subexpression_elimination",
    +                     doCommonSubexpressionElimination__field_descriptor),
    +                    ("do_constant_folding", doConstantFolding__field_descriptor),
    +                    ("do_function_inlining", doFunctionInlining__field_descriptor),
    +                    ("opt_level", optLevel__field_descriptor)])
    +
    +data OptimizerOptions'Level = OptimizerOptions'L0
    +                            | OptimizerOptions'L1
    +                            deriving (Prelude.Show, Prelude.Eq)
    +
    +instance Data.Default.Class.Default OptimizerOptions'Level where
    +        def = OptimizerOptions'L0
    +
    +instance Data.ProtoLens.FieldDefault OptimizerOptions'Level where
    +        fieldDefault = OptimizerOptions'L0
    +
    +instance Data.ProtoLens.MessageEnum OptimizerOptions'Level where
    +        maybeToEnum (-1) = Prelude.Just OptimizerOptions'L0
    +        maybeToEnum 0 = Prelude.Just OptimizerOptions'L1
    +        maybeToEnum _ = Prelude.Nothing
    +        showEnum OptimizerOptions'L0 = "L0"
    +        showEnum OptimizerOptions'L1 = "L1"
    +        readEnum "L0" = Prelude.Just OptimizerOptions'L0
    +        readEnum "L1" = Prelude.Just OptimizerOptions'L1
    +        readEnum _ = Prelude.Nothing
    +
    +instance Prelude.Enum OptimizerOptions'Level where
    +        toEnum k__
    +          = Prelude.maybe
    +              (Prelude.error
    +                 ((Prelude.++) "toEnum: unknown value for enum Level: "
    +                    (Prelude.show k__)))
    +              Prelude.id
    +              (Data.ProtoLens.maybeToEnum k__)
    +        fromEnum OptimizerOptions'L0 = -1
    +        fromEnum OptimizerOptions'L1 = 0
    +        succ OptimizerOptions'L1
    +          = Prelude.error
    +              "Ident \"OptimizerOptions'Level\".Ident \"succ\": bad argument Ident \"OptimizerOptions'L1\". This value would be out of bounds."
    +        succ OptimizerOptions'L0 = OptimizerOptions'L1
    +        pred OptimizerOptions'L0
    +          = Prelude.error
    +              "Ident \"OptimizerOptions'Level\".Ident \"pred\": bad argument Ident \"OptimizerOptions'L0\". This value would be out of bounds."
    +        pred OptimizerOptions'L1 = OptimizerOptions'L0
    +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    +
    +instance Prelude.Bounded OptimizerOptions'Level where
    +        minBound = OptimizerOptions'L0
    +        maxBound = OptimizerOptions'L1
    +
    +data RunMetadata = RunMetadata{_RunMetadata'stepStats ::
    +                               Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats,
    +                               _RunMetadata'costGraph ::
    +                               Prelude.Maybe
    +                                 Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
    +                               _RunMetadata'partitionGraphs ::
    +                               [Proto.Tensorflow.Core.Framework.Graph.GraphDef]}
    +                 deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "stepStats" RunMetadata =
    +     Proto.Tensorflow.Core.Framework.StepStats.StepStats
    +
    +instance Data.ProtoLens.HasField "stepStats" RunMetadata
    +         RunMetadata where
    +        field _
    +          = (Prelude..) maybe'stepStats
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'stepStats" RunMetadata =
    +     Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats
    +
    +instance Data.ProtoLens.HasField "maybe'stepStats" RunMetadata
    +         RunMetadata where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunMetadata'stepStats
    +              (\ x__ y__ -> x__{_RunMetadata'stepStats = y__})
    +
    +type instance Data.ProtoLens.Field "costGraph" RunMetadata =
    +     Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef
    +
    +instance Data.ProtoLens.HasField "costGraph" RunMetadata
    +         RunMetadata where
    +        field _
    +          = (Prelude..) maybe'costGraph
    +              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    +
    +type instance Data.ProtoLens.Field "maybe'costGraph" RunMetadata =
    +     Prelude.Maybe
    +       Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef
    +
    +instance Data.ProtoLens.HasField "maybe'costGraph" RunMetadata
    +         RunMetadata where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunMetadata'costGraph
    +              (\ x__ y__ -> x__{_RunMetadata'costGraph = y__})
    +
    +type instance Data.ProtoLens.Field "partitionGraphs" RunMetadata =
    +     [Proto.Tensorflow.Core.Framework.Graph.GraphDef]
    +
    +instance Data.ProtoLens.HasField "partitionGraphs" RunMetadata
    +         RunMetadata where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunMetadata'partitionGraphs
    +              (\ x__ y__ -> x__{_RunMetadata'partitionGraphs = y__})
    +
    +instance Data.Default.Class.Default RunMetadata where
    +        def
    +          = RunMetadata{_RunMetadata'stepStats = Prelude.Nothing,
    +                        _RunMetadata'costGraph = Prelude.Nothing,
    +                        _RunMetadata'partitionGraphs = []}
    +
    +instance Data.ProtoLens.Message RunMetadata where
    +        descriptor
    +          = let stepStats__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "step_stats"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.StepStats.StepStats)
    +                      (Data.ProtoLens.OptionalField maybe'stepStats)
    +                costGraph__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "cost_graph"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef)
    +                      (Data.ProtoLens.OptionalField maybe'costGraph)
    +                partitionGraphs__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "partition_graphs"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor
    +                           Proto.Tensorflow.Core.Framework.Graph.GraphDef)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         partitionGraphs)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, stepStats__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, costGraph__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, partitionGraphs__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("step_stats", stepStats__field_descriptor),
    +                    ("cost_graph", costGraph__field_descriptor),
    +                    ("partition_graphs", partitionGraphs__field_descriptor)])
    +
    +data RunOptions = RunOptions{_RunOptions'traceLevel ::
    +                             RunOptions'TraceLevel,
    +                             _RunOptions'timeoutInMs :: Data.Int.Int64,
    +                             _RunOptions'interOpThreadPool :: Data.Int.Int32,
    +                             _RunOptions'debugTensorWatchOpts :: [DebugTensorWatch],
    +                             _RunOptions'outputPartitionGraphs :: Prelude.Bool}
    +                deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance Data.ProtoLens.Field "traceLevel" RunOptions =
    +     RunOptions'TraceLevel
    +
    +instance Data.ProtoLens.HasField "traceLevel" RunOptions RunOptions
    +         where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunOptions'traceLevel
    +              (\ x__ y__ -> x__{_RunOptions'traceLevel = y__})
    +
    +type instance Data.ProtoLens.Field "timeoutInMs" RunOptions =
    +     Data.Int.Int64
    +
    +instance Data.ProtoLens.HasField "timeoutInMs" RunOptions
    +         RunOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunOptions'timeoutInMs
    +              (\ x__ y__ -> x__{_RunOptions'timeoutInMs = y__})
    +
    +type instance Data.ProtoLens.Field "interOpThreadPool" RunOptions =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "interOpThreadPool" RunOptions
    +         RunOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunOptions'interOpThreadPool
    +              (\ x__ y__ -> x__{_RunOptions'interOpThreadPool = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "debugTensorWatchOpts" RunOptions =
    +     [DebugTensorWatch]
    +
    +instance Data.ProtoLens.HasField "debugTensorWatchOpts" RunOptions
    +         RunOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunOptions'debugTensorWatchOpts
    +              (\ x__ y__ -> x__{_RunOptions'debugTensorWatchOpts = y__})
    +
    +type instance
    +     Data.ProtoLens.Field "outputPartitionGraphs" RunOptions =
    +     Prelude.Bool
    +
    +instance Data.ProtoLens.HasField "outputPartitionGraphs" RunOptions
    +         RunOptions where
    +        field _
    +          = Lens.Family2.Unchecked.lens _RunOptions'outputPartitionGraphs
    +              (\ x__ y__ -> x__{_RunOptions'outputPartitionGraphs = y__})
    +
    +instance Data.Default.Class.Default RunOptions where
    +        def
    +          = RunOptions{_RunOptions'traceLevel = Data.Default.Class.def,
    +                       _RunOptions'timeoutInMs = Data.ProtoLens.fieldDefault,
    +                       _RunOptions'interOpThreadPool = Data.ProtoLens.fieldDefault,
    +                       _RunOptions'debugTensorWatchOpts = [],
    +                       _RunOptions'outputPartitionGraphs = Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message RunOptions where
    +        descriptor
    +          = let traceLevel__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "trace_level"
    +                      (Data.ProtoLens.EnumField ::
    +                         Data.ProtoLens.FieldTypeDescriptor RunOptions'TraceLevel)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional traceLevel)
    +                timeoutInMs__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "timeout_in_ms"
    +                      (Data.ProtoLens.Int64Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timeoutInMs)
    +                interOpThreadPool__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "inter_op_thread_pool"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         interOpThreadPool)
    +                debugTensorWatchOpts__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "debug_tensor_watch_opts"
    +                      (Data.ProtoLens.MessageField ::
    +                         Data.ProtoLens.FieldTypeDescriptor DebugTensorWatch)
    +                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    +                         debugTensorWatchOpts)
    +                outputPartitionGraphs__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "output_partition_graphs"
    +                      (Data.ProtoLens.BoolField ::
    +                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    +                         outputPartitionGraphs)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, traceLevel__field_descriptor),
    +                    (Data.ProtoLens.Tag 2, timeoutInMs__field_descriptor),
    +                    (Data.ProtoLens.Tag 3, interOpThreadPool__field_descriptor),
    +                    (Data.ProtoLens.Tag 4, debugTensorWatchOpts__field_descriptor),
    +                    (Data.ProtoLens.Tag 5, outputPartitionGraphs__field_descriptor)])
    +                (Data.Map.fromList
    +                   [("trace_level", traceLevel__field_descriptor),
    +                    ("timeout_in_ms", timeoutInMs__field_descriptor),
    +                    ("inter_op_thread_pool", interOpThreadPool__field_descriptor),
    +                    ("debug_tensor_watch_opts",
    +                     debugTensorWatchOpts__field_descriptor),
    +                    ("output_partition_graphs",
    +                     outputPartitionGraphs__field_descriptor)])
    +
    +data RunOptions'TraceLevel = RunOptions'NO_TRACE
    +                           | RunOptions'SOFTWARE_TRACE
    +                           | RunOptions'HARDWARE_TRACE
    +                           | RunOptions'FULL_TRACE
    +                           deriving (Prelude.Show, Prelude.Eq)
    +
    +instance Data.Default.Class.Default RunOptions'TraceLevel where
    +        def = RunOptions'NO_TRACE
    +
    +instance Data.ProtoLens.FieldDefault RunOptions'TraceLevel where
    +        fieldDefault = RunOptions'NO_TRACE
    +
    +instance Data.ProtoLens.MessageEnum RunOptions'TraceLevel where
    +        maybeToEnum 0 = Prelude.Just RunOptions'NO_TRACE
    +        maybeToEnum 1 = Prelude.Just RunOptions'SOFTWARE_TRACE
    +        maybeToEnum 2 = Prelude.Just RunOptions'HARDWARE_TRACE
    +        maybeToEnum 3 = Prelude.Just RunOptions'FULL_TRACE
    +        maybeToEnum _ = Prelude.Nothing
    +        showEnum RunOptions'NO_TRACE = "NO_TRACE"
    +        showEnum RunOptions'SOFTWARE_TRACE = "SOFTWARE_TRACE"
    +        showEnum RunOptions'HARDWARE_TRACE = "HARDWARE_TRACE"
    +        showEnum RunOptions'FULL_TRACE = "FULL_TRACE"
    +        readEnum "NO_TRACE" = Prelude.Just RunOptions'NO_TRACE
    +        readEnum "SOFTWARE_TRACE" = Prelude.Just RunOptions'SOFTWARE_TRACE
    +        readEnum "HARDWARE_TRACE" = Prelude.Just RunOptions'HARDWARE_TRACE
    +        readEnum "FULL_TRACE" = Prelude.Just RunOptions'FULL_TRACE
    +        readEnum _ = Prelude.Nothing
    +
    +instance Prelude.Enum RunOptions'TraceLevel where
    +        toEnum k__
    +          = Prelude.maybe
    +              (Prelude.error
    +                 ((Prelude.++) "toEnum: unknown value for enum TraceLevel: "
    +                    (Prelude.show k__)))
    +              Prelude.id
    +              (Data.ProtoLens.maybeToEnum k__)
    +        fromEnum RunOptions'NO_TRACE = 0
    +        fromEnum RunOptions'SOFTWARE_TRACE = 1
    +        fromEnum RunOptions'HARDWARE_TRACE = 2
    +        fromEnum RunOptions'FULL_TRACE = 3
    +        succ RunOptions'FULL_TRACE
    +          = Prelude.error
    +              "Ident \"RunOptions'TraceLevel\".Ident \"succ\": bad argument Ident \"RunOptions'FULL_TRACE\". This value would be out of bounds."
    +        succ RunOptions'NO_TRACE = RunOptions'SOFTWARE_TRACE
    +        succ RunOptions'SOFTWARE_TRACE = RunOptions'HARDWARE_TRACE
    +        succ RunOptions'HARDWARE_TRACE = RunOptions'FULL_TRACE
    +        pred RunOptions'NO_TRACE
    +          = Prelude.error
    +              "Ident \"RunOptions'TraceLevel\".Ident \"pred\": bad argument Ident \"RunOptions'NO_TRACE\". This value would be out of bounds."
    +        pred RunOptions'SOFTWARE_TRACE = RunOptions'NO_TRACE
    +        pred RunOptions'HARDWARE_TRACE = RunOptions'SOFTWARE_TRACE
    +        pred RunOptions'FULL_TRACE = RunOptions'HARDWARE_TRACE
    +        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    +        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    +        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    +        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    +
    +instance Prelude.Bounded RunOptions'TraceLevel where
    +        minBound = RunOptions'NO_TRACE
    +        maxBound = RunOptions'FULL_TRACE
    +
    +data ThreadPoolOptionProto = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads
    +                                                   :: Data.Int.Int32}
    +                           deriving (Prelude.Show, Prelude.Eq)
    +
    +type instance
    +     Data.ProtoLens.Field "numThreads" ThreadPoolOptionProto =
    +     Data.Int.Int32
    +
    +instance Data.ProtoLens.HasField "numThreads" ThreadPoolOptionProto
    +         ThreadPoolOptionProto where
    +        field _
    +          = Lens.Family2.Unchecked.lens _ThreadPoolOptionProto'numThreads
    +              (\ x__ y__ -> x__{_ThreadPoolOptionProto'numThreads = y__})
    +
    +instance Data.Default.Class.Default ThreadPoolOptionProto where
    +        def
    +          = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads =
    +                                    Data.ProtoLens.fieldDefault}
    +
    +instance Data.ProtoLens.Message ThreadPoolOptionProto where
    +        descriptor
    +          = let numThreads__field_descriptor
    +                  = Data.ProtoLens.FieldDescriptor "num_threads"
    +                      (Data.ProtoLens.Int32Field ::
    +                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    +                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numThreads)
    +              in
    +              Data.ProtoLens.MessageDescriptor
    +                (Data.Map.fromList
    +                   [(Data.ProtoLens.Tag 1, numThreads__field_descriptor)])
    +                (Data.Map.fromList [("num_threads", numThreads__field_descriptor)])
    +
    +allocatorType ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "allocatorType" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "allocatorType" msg)
    +                  (Data.ProtoLens.Field "allocatorType" msg')
    +allocatorType
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allocatorType")
    +
    +allowGrowth ::
    +            forall msg msg' . Data.ProtoLens.HasField "allowGrowth" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "allowGrowth" msg)
    +                (Data.ProtoLens.Field "allowGrowth" msg')
    +allowGrowth
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "allowGrowth")
    +
    +allowSoftPlacement ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "allowSoftPlacement" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "allowSoftPlacement" msg)
    +                       (Data.ProtoLens.Field "allowSoftPlacement" msg')
    +allowSoftPlacement
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "allowSoftPlacement")
    +
    +buildCostModel ::
    +               forall msg msg' .
    +                 Data.ProtoLens.HasField "buildCostModel" msg msg' =>
    +                 Lens.Family2.Lens msg msg'
    +                   (Data.ProtoLens.Field "buildCostModel" msg)
    +                   (Data.ProtoLens.Field "buildCostModel" msg')
    +buildCostModel
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "buildCostModel")
    +
    +buildCostModelAfter ::
    +                    forall msg msg' .
    +                      Data.ProtoLens.HasField "buildCostModelAfter" msg msg' =>
    +                      Lens.Family2.Lens msg msg'
    +                        (Data.ProtoLens.Field "buildCostModelAfter" msg)
    +                        (Data.ProtoLens.Field "buildCostModelAfter" msg')
    +buildCostModelAfter
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "buildCostModelAfter")
    +
    +costGraph ::
    +          forall msg msg' . Data.ProtoLens.HasField "costGraph" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "costGraph" msg)
    +              (Data.ProtoLens.Field "costGraph" msg')
    +costGraph
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "costGraph")
    +
    +debugOps ::
    +         forall msg msg' . Data.ProtoLens.HasField "debugOps" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "debugOps" msg)
    +             (Data.ProtoLens.Field "debugOps" msg')
    +debugOps
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "debugOps")
    +
    +debugTensorWatchOpts ::
    +                     forall msg msg' .
    +                       Data.ProtoLens.HasField "debugTensorWatchOpts" msg msg' =>
    +                       Lens.Family2.Lens msg msg'
    +                         (Data.ProtoLens.Field "debugTensorWatchOpts" msg)
    +                         (Data.ProtoLens.Field "debugTensorWatchOpts" msg')
    +debugTensorWatchOpts
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "debugTensorWatchOpts")
    +
    +debugUrls ::
    +          forall msg msg' . Data.ProtoLens.HasField "debugUrls" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "debugUrls" msg)
    +              (Data.ProtoLens.Field "debugUrls" msg')
    +debugUrls
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "debugUrls")
    +
    +deferredDeletionBytes ::
    +                      forall msg msg' .
    +                        Data.ProtoLens.HasField "deferredDeletionBytes" msg msg' =>
    +                        Lens.Family2.Lens msg msg'
    +                          (Data.ProtoLens.Field "deferredDeletionBytes" msg)
    +                          (Data.ProtoLens.Field "deferredDeletionBytes" msg')
    +deferredDeletionBytes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "deferredDeletionBytes")
    +
    +deviceCount ::
    +            forall msg msg' . Data.ProtoLens.HasField "deviceCount" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "deviceCount" msg)
    +                (Data.ProtoLens.Field "deviceCount" msg')
    +deviceCount
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "deviceCount")
    +
    +deviceFilters ::
    +              forall msg msg' .
    +                Data.ProtoLens.HasField "deviceFilters" msg msg' =>
    +                Lens.Family2.Lens msg msg'
    +                  (Data.ProtoLens.Field "deviceFilters" msg)
    +                  (Data.ProtoLens.Field "deviceFilters" msg')
    +deviceFilters
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "deviceFilters")
    +
    +doCommonSubexpressionElimination ::
    +                                 forall msg msg' .
    +                                   Data.ProtoLens.HasField "doCommonSubexpressionElimination" msg
    +                                     msg' =>
    +                                   Lens.Family2.Lens msg msg'
    +                                     (Data.ProtoLens.Field "doCommonSubexpressionElimination" msg)
    +                                     (Data.ProtoLens.Field "doCommonSubexpressionElimination" msg')
    +doCommonSubexpressionElimination
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "doCommonSubexpressionElimination")
    +
    +doConstantFolding ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "doConstantFolding" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "doConstantFolding" msg)
    +                      (Data.ProtoLens.Field "doConstantFolding" msg')
    +doConstantFolding
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "doConstantFolding")
    +
    +doFunctionInlining ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "doFunctionInlining" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "doFunctionInlining" msg)
    +                       (Data.ProtoLens.Field "doFunctionInlining" msg')
    +doFunctionInlining
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "doFunctionInlining")
    +
    +enableBfloat16Sendrecv ::
    +                       forall msg msg' .
    +                         Data.ProtoLens.HasField "enableBfloat16Sendrecv" msg msg' =>
    +                         Lens.Family2.Lens msg msg'
    +                           (Data.ProtoLens.Field "enableBfloat16Sendrecv" msg)
    +                           (Data.ProtoLens.Field "enableBfloat16Sendrecv" msg')
    +enableBfloat16Sendrecv
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "enableBfloat16Sendrecv")
    +
    +enableRecvScheduling ::
    +                     forall msg msg' .
    +                       Data.ProtoLens.HasField "enableRecvScheduling" msg msg' =>
    +                       Lens.Family2.Lens msg msg'
    +                         (Data.ProtoLens.Field "enableRecvScheduling" msg)
    +                         (Data.ProtoLens.Field "enableRecvScheduling" msg')
    +enableRecvScheduling
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "enableRecvScheduling")
    +
    +gpuOptions ::
    +           forall msg msg' . Data.ProtoLens.HasField "gpuOptions" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "gpuOptions" msg)
    +               (Data.ProtoLens.Field "gpuOptions" msg')
    +gpuOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gpuOptions")
    +
    +graphOptions ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "graphOptions" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "graphOptions" msg)
    +                 (Data.ProtoLens.Field "graphOptions" msg')
    +graphOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "graphOptions")
    +
    +inferShapes ::
    +            forall msg msg' . Data.ProtoLens.HasField "inferShapes" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inferShapes" msg)
    +                (Data.ProtoLens.Field "inferShapes" msg')
    +inferShapes
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inferShapes")
    +
    +interOpParallelismThreads ::
    +                          forall msg msg' .
    +                            Data.ProtoLens.HasField "interOpParallelismThreads" msg msg' =>
    +                            Lens.Family2.Lens msg msg'
    +                              (Data.ProtoLens.Field "interOpParallelismThreads" msg)
    +                              (Data.ProtoLens.Field "interOpParallelismThreads" msg')
    +interOpParallelismThreads
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "interOpParallelismThreads")
    +
    +interOpThreadPool ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "interOpThreadPool" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "interOpThreadPool" msg)
    +                      (Data.ProtoLens.Field "interOpThreadPool" msg')
    +interOpThreadPool
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "interOpThreadPool")
    +
    +intraOpParallelismThreads ::
    +                          forall msg msg' .
    +                            Data.ProtoLens.HasField "intraOpParallelismThreads" msg msg' =>
    +                            Lens.Family2.Lens msg msg'
    +                              (Data.ProtoLens.Field "intraOpParallelismThreads" msg)
    +                              (Data.ProtoLens.Field "intraOpParallelismThreads" msg')
    +intraOpParallelismThreads
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "intraOpParallelismThreads")
    +
    +key ::
    +    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    +      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    +        (Data.ProtoLens.Field "key" msg')
    +key
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    +
    +logDevicePlacement ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "logDevicePlacement" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "logDevicePlacement" msg)
    +                       (Data.ProtoLens.Field "logDevicePlacement" msg')
    +logDevicePlacement
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "logDevicePlacement")
    +
    +maybe'costGraph ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "maybe'costGraph" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "maybe'costGraph" msg)
    +                    (Data.ProtoLens.Field "maybe'costGraph" msg')
    +maybe'costGraph
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'costGraph")
    +
    +maybe'gpuOptions ::
    +                 forall msg msg' .
    +                   Data.ProtoLens.HasField "maybe'gpuOptions" msg msg' =>
    +                   Lens.Family2.Lens msg msg'
    +                     (Data.ProtoLens.Field "maybe'gpuOptions" msg)
    +                     (Data.ProtoLens.Field "maybe'gpuOptions" msg')
    +maybe'gpuOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'gpuOptions")
    +
    +maybe'graphOptions ::
    +                   forall msg msg' .
    +                     Data.ProtoLens.HasField "maybe'graphOptions" msg msg' =>
    +                     Lens.Family2.Lens msg msg'
    +                       (Data.ProtoLens.Field "maybe'graphOptions" msg)
    +                       (Data.ProtoLens.Field "maybe'graphOptions" msg')
    +maybe'graphOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'graphOptions")
    +
    +maybe'optimizerOptions ::
    +                       forall msg msg' .
    +                         Data.ProtoLens.HasField "maybe'optimizerOptions" msg msg' =>
    +                         Lens.Family2.Lens msg msg'
    +                           (Data.ProtoLens.Field "maybe'optimizerOptions" msg)
    +                           (Data.ProtoLens.Field "maybe'optimizerOptions" msg')
    +maybe'optimizerOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'optimizerOptions")
    +
    +maybe'stepStats ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "maybe'stepStats" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "maybe'stepStats" msg)
    +                    (Data.ProtoLens.Field "maybe'stepStats" msg')
    +maybe'stepStats
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "maybe'stepStats")
    +
    +nodeName ::
    +         forall msg msg' . Data.ProtoLens.HasField "nodeName" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeName" msg)
    +             (Data.ProtoLens.Field "nodeName" msg')
    +nodeName
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeName")
    +
    +numThreads ::
    +           forall msg msg' . Data.ProtoLens.HasField "numThreads" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "numThreads" msg)
    +               (Data.ProtoLens.Field "numThreads" msg')
    +numThreads
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "numThreads")
    +
    +operationTimeoutInMs ::
    +                     forall msg msg' .
    +                       Data.ProtoLens.HasField "operationTimeoutInMs" msg msg' =>
    +                       Lens.Family2.Lens msg msg'
    +                         (Data.ProtoLens.Field "operationTimeoutInMs" msg)
    +                         (Data.ProtoLens.Field "operationTimeoutInMs" msg')
    +operationTimeoutInMs
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "operationTimeoutInMs")
    +
    +optLevel ::
    +         forall msg msg' . Data.ProtoLens.HasField "optLevel" msg msg' =>
    +           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "optLevel" msg)
    +             (Data.ProtoLens.Field "optLevel" msg')
    +optLevel
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "optLevel")
    +
    +optimizerOptions ::
    +                 forall msg msg' .
    +                   Data.ProtoLens.HasField "optimizerOptions" msg msg' =>
    +                   Lens.Family2.Lens msg msg'
    +                     (Data.ProtoLens.Field "optimizerOptions" msg)
    +                     (Data.ProtoLens.Field "optimizerOptions" msg')
    +optimizerOptions
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "optimizerOptions")
    +
    +outputPartitionGraphs ::
    +                      forall msg msg' .
    +                        Data.ProtoLens.HasField "outputPartitionGraphs" msg msg' =>
    +                        Lens.Family2.Lens msg msg'
    +                          (Data.ProtoLens.Field "outputPartitionGraphs" msg)
    +                          (Data.ProtoLens.Field "outputPartitionGraphs" msg')
    +outputPartitionGraphs
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "outputPartitionGraphs")
    +
    +outputSlot ::
    +           forall msg msg' . Data.ProtoLens.HasField "outputSlot" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputSlot" msg)
    +               (Data.ProtoLens.Field "outputSlot" msg')
    +outputSlot
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputSlot")
    +
    +partitionGraphs ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "partitionGraphs" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "partitionGraphs" msg)
    +                    (Data.ProtoLens.Field "partitionGraphs" msg')
    +partitionGraphs
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "partitionGraphs")
    +
    +perProcessGpuMemoryFraction ::
    +                            forall msg msg' .
    +                              Data.ProtoLens.HasField "perProcessGpuMemoryFraction" msg msg' =>
    +                              Lens.Family2.Lens msg msg'
    +                                (Data.ProtoLens.Field "perProcessGpuMemoryFraction" msg)
    +                                (Data.ProtoLens.Field "perProcessGpuMemoryFraction" msg')
    +perProcessGpuMemoryFraction
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "perProcessGpuMemoryFraction")
    +
    +placePrunedGraph ::
    +                 forall msg msg' .
    +                   Data.ProtoLens.HasField "placePrunedGraph" msg msg' =>
    +                   Lens.Family2.Lens msg msg'
    +                     (Data.ProtoLens.Field "placePrunedGraph" msg)
    +                     (Data.ProtoLens.Field "placePrunedGraph" msg')
    +placePrunedGraph
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "placePrunedGraph")
    +
    +placementPeriod ::
    +                forall msg msg' .
    +                  Data.ProtoLens.HasField "placementPeriod" msg msg' =>
    +                  Lens.Family2.Lens msg msg'
    +                    (Data.ProtoLens.Field "placementPeriod" msg)
    +                    (Data.ProtoLens.Field "placementPeriod" msg')
    +placementPeriod
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "placementPeriod")
    +
    +sessionInterOpThreadPool ::
    +                         forall msg msg' .
    +                           Data.ProtoLens.HasField "sessionInterOpThreadPool" msg msg' =>
    +                           Lens.Family2.Lens msg msg'
    +                             (Data.ProtoLens.Field "sessionInterOpThreadPool" msg)
    +                             (Data.ProtoLens.Field "sessionInterOpThreadPool" msg')
    +sessionInterOpThreadPool
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "sessionInterOpThreadPool")
    +
    +stepStats ::
    +          forall msg msg' . Data.ProtoLens.HasField "stepStats" msg msg' =>
    +            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "stepStats" msg)
    +              (Data.ProtoLens.Field "stepStats" msg')
    +stepStats
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "stepStats")
    +
    +timelineStep ::
    +             forall msg msg' .
    +               Data.ProtoLens.HasField "timelineStep" msg msg' =>
    +               Lens.Family2.Lens msg msg'
    +                 (Data.ProtoLens.Field "timelineStep" msg)
    +                 (Data.ProtoLens.Field "timelineStep" msg')
    +timelineStep
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "timelineStep")
    +
    +timeoutInMs ::
    +            forall msg msg' . Data.ProtoLens.HasField "timeoutInMs" msg msg' =>
    +              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "timeoutInMs" msg)
    +                (Data.ProtoLens.Field "timeoutInMs" msg')
    +timeoutInMs
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "timeoutInMs")
    +
    +traceLevel ::
    +           forall msg msg' . Data.ProtoLens.HasField "traceLevel" msg msg' =>
    +             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "traceLevel" msg)
    +               (Data.ProtoLens.Field "traceLevel" msg')
    +traceLevel
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "traceLevel")
    +
    +usePerSessionThreads ::
    +                     forall msg msg' .
    +                       Data.ProtoLens.HasField "usePerSessionThreads" msg msg' =>
    +                       Lens.Family2.Lens msg msg'
    +                         (Data.ProtoLens.Field "usePerSessionThreads" msg)
    +                         (Data.ProtoLens.Field "usePerSessionThreads" msg')
    +usePerSessionThreads
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "usePerSessionThreads")
    +
    +value ::
    +      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    +        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    +          (Data.ProtoLens.Field "value" msg')
    +value
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    +
    +visibleDeviceList ::
    +                  forall msg msg' .
    +                    Data.ProtoLens.HasField "visibleDeviceList" msg msg' =>
    +                    Lens.Family2.Lens msg msg'
    +                      (Data.ProtoLens.Field "visibleDeviceList" msg)
    +                      (Data.ProtoLens.Field "visibleDeviceList" msg')
    +visibleDeviceList
    +  = Data.ProtoLens.field
    +      (Data.ProtoLens.ProxySym ::
    +         Data.ProtoLens.ProxySym "visibleDeviceList")
    +
    + diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-proto-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt b/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt new file mode 100644 index 0000000..095759b --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt @@ -0,0 +1,663 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | TensorFlow protocol buffers. +-- +-- Please see README.md +@package tensorflow-proto +@version 0.1.0.0 + +module Proto.Tensorflow.Core.Framework.ResourceHandle +data ResourceHandle +ResourceHandle :: Text -> Text -> Text -> Word64 -> Text -> ResourceHandle +[_ResourceHandle'device] :: ResourceHandle -> Text +[_ResourceHandle'container] :: ResourceHandle -> Text +[_ResourceHandle'name] :: ResourceHandle -> Text +[_ResourceHandle'hashCode] :: ResourceHandle -> Word64 +[_ResourceHandle'maybeTypeName] :: ResourceHandle -> Text +container :: HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg') +device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') +hashCode :: HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg') +maybeTypeName :: HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg') +name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Field.HasField "device" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Field.HasField "container" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Field.HasField "hashCode" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Field.HasField "maybeTypeName" Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle + +module Proto.Tensorflow.Core.Framework.Types +data DataType +DT_INVALID :: DataType +DT_FLOAT :: DataType +DT_DOUBLE :: DataType +DT_INT32 :: DataType +DT_UINT8 :: DataType +DT_INT16 :: DataType +DT_INT8 :: DataType +DT_STRING :: DataType +DT_COMPLEX64 :: DataType +DT_INT64 :: DataType +DT_BOOL :: DataType +DT_QINT8 :: DataType +DT_QUINT8 :: DataType +DT_QINT32 :: DataType +DT_BFLOAT16 :: DataType +DT_QINT16 :: DataType +DT_QUINT16 :: DataType +DT_UINT16 :: DataType +DT_COMPLEX128 :: DataType +DT_HALF :: DataType +DT_RESOURCE :: DataType +DT_FLOAT_REF :: DataType +DT_DOUBLE_REF :: DataType +DT_INT32_REF :: DataType +DT_UINT8_REF :: DataType +DT_INT16_REF :: DataType +DT_INT8_REF :: DataType +DT_STRING_REF :: DataType +DT_COMPLEX64_REF :: DataType +DT_INT64_REF :: DataType +DT_BOOL_REF :: DataType +DT_QINT8_REF :: DataType +DT_QUINT8_REF :: DataType +DT_QINT32_REF :: DataType +DT_BFLOAT16_REF :: DataType +DT_QINT16_REF :: DataType +DT_QUINT16_REF :: DataType +DT_UINT16_REF :: DataType +DT_COMPLEX128_REF :: DataType +DT_HALF_REF :: DataType +DT_RESOURCE_REF :: DataType +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Types.DataType +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Types.DataType +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Types.DataType +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Framework.Types.DataType +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Framework.Types.DataType +instance GHC.Enum.Enum Proto.Tensorflow.Core.Framework.Types.DataType +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Framework.Types.DataType + +module Proto.Tensorflow.Core.Framework.TensorShape +data TensorShapeProto +TensorShapeProto :: [TensorShapeProto'Dim] -> Bool -> TensorShapeProto +[_TensorShapeProto'dim] :: TensorShapeProto -> [TensorShapeProto'Dim] +[_TensorShapeProto'unknownRank] :: TensorShapeProto -> Bool +data TensorShapeProto'Dim +TensorShapeProto'Dim :: Int64 -> Text -> TensorShapeProto'Dim +[_TensorShapeProto'Dim'size] :: TensorShapeProto'Dim -> Int64 +[_TensorShapeProto'Dim'name] :: TensorShapeProto'Dim -> Text +dim :: HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg') +name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') +size :: HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg') +unknownRank :: HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim +instance Data.ProtoLens.Field.HasField "dim" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance Data.ProtoLens.Field.HasField "unknownRank" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto +instance Data.ProtoLens.Field.HasField "size" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto'Dim + +module Proto.Tensorflow.Core.Framework.Tensor +data TensorProto +TensorProto :: DataType -> Maybe TensorShapeProto -> Int32 -> ByteString -> [Int32] -> [Float] -> [Double] -> [Int32] -> [ByteString] -> [Float] -> [Int64] -> [Bool] -> [Double] -> [ResourceHandle] -> TensorProto +[_TensorProto'dtype] :: TensorProto -> DataType +[_TensorProto'tensorShape] :: TensorProto -> Maybe TensorShapeProto +[_TensorProto'versionNumber] :: TensorProto -> Int32 +[_TensorProto'tensorContent] :: TensorProto -> ByteString +[_TensorProto'halfVal] :: TensorProto -> [Int32] +[_TensorProto'floatVal] :: TensorProto -> [Float] +[_TensorProto'doubleVal] :: TensorProto -> [Double] +[_TensorProto'intVal] :: TensorProto -> [Int32] +[_TensorProto'stringVal] :: TensorProto -> [ByteString] +[_TensorProto'scomplexVal] :: TensorProto -> [Float] +[_TensorProto'int64Val] :: TensorProto -> [Int64] +[_TensorProto'boolVal] :: TensorProto -> [Bool] +[_TensorProto'dcomplexVal] :: TensorProto -> [Double] +[_TensorProto'resourceHandleVal] :: TensorProto -> [ResourceHandle] +boolVal :: HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg') +dcomplexVal :: HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg') +doubleVal :: HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg') +dtype :: HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg') +floatVal :: HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg') +halfVal :: HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg') +int64Val :: HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg') +intVal :: HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg') +maybe'tensorShape :: HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg') +resourceHandleVal :: HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg') +scomplexVal :: HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg') +stringVal :: HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg') +tensorContent :: HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg') +tensorShape :: HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg') +versionNumber :: HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "dtype" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "tensorShape" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "maybe'tensorShape" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "versionNumber" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "tensorContent" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "halfVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "floatVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "doubleVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "intVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "stringVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "scomplexVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "int64Val" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "boolVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "dcomplexVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Field.HasField "resourceHandleVal" Proto.Tensorflow.Core.Framework.Tensor.TensorProto Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Tensor.TensorProto +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Tensor.TensorProto + +module Proto.Tensorflow.Core.Framework.AttrValue +data AttrValue +AttrValue :: Maybe ByteString -> Maybe Int64 -> Maybe Float -> Maybe Bool -> Maybe DataType -> Maybe TensorShapeProto -> Maybe TensorProto -> Maybe AttrValue'ListValue -> Maybe NameAttrList -> Maybe Text -> AttrValue +[_AttrValue's] :: AttrValue -> Maybe ByteString +[_AttrValue'i] :: AttrValue -> Maybe Int64 +[_AttrValue'f] :: AttrValue -> Maybe Float +[_AttrValue'b] :: AttrValue -> Maybe Bool +[_AttrValue'type'] :: AttrValue -> Maybe DataType +[_AttrValue'shape] :: AttrValue -> Maybe TensorShapeProto +[_AttrValue'tensor] :: AttrValue -> Maybe TensorProto +[_AttrValue'list] :: AttrValue -> Maybe AttrValue'ListValue +[_AttrValue'func] :: AttrValue -> Maybe NameAttrList +[_AttrValue'placeholder] :: AttrValue -> Maybe Text +data AttrValue'ListValue +AttrValue'ListValue :: [ByteString] -> [Int64] -> [Float] -> [Bool] -> [DataType] -> [TensorShapeProto] -> [TensorProto] -> AttrValue'ListValue +[_AttrValue'ListValue's] :: AttrValue'ListValue -> [ByteString] +[_AttrValue'ListValue'i] :: AttrValue'ListValue -> [Int64] +[_AttrValue'ListValue'f] :: AttrValue'ListValue -> [Float] +[_AttrValue'ListValue'b] :: AttrValue'ListValue -> [Bool] +[_AttrValue'ListValue'type'] :: AttrValue'ListValue -> [DataType] +[_AttrValue'ListValue'shape] :: AttrValue'ListValue -> [TensorShapeProto] +[_AttrValue'ListValue'tensor] :: AttrValue'ListValue -> [TensorProto] +data NameAttrList +NameAttrList :: Text -> Map Text AttrValue -> NameAttrList +[_NameAttrList'name] :: NameAttrList -> Text +[_NameAttrList'attr] :: NameAttrList -> Map Text AttrValue +data NameAttrList'AttrEntry +NameAttrList'AttrEntry :: Text -> Maybe AttrValue -> NameAttrList'AttrEntry +[_NameAttrList'AttrEntry'key] :: NameAttrList'AttrEntry -> Text +[_NameAttrList'AttrEntry'value] :: NameAttrList'AttrEntry -> Maybe AttrValue +attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') +b :: HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg') +f :: HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg') +func :: HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg') +i :: HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg') +key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') +list :: HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg') +maybe'b :: HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg') +maybe'f :: HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg') +maybe'func :: HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg') +maybe'i :: HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg') +maybe'list :: HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg') +maybe'placeholder :: HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg') +maybe's :: HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg') +maybe'shape :: HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg') +maybe'tensor :: HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') +maybe'type' :: HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg') +maybe'value :: HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') +name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') +placeholder :: HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg') +s :: HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg') +shape :: HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg') +tensor :: HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') +type' :: HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') +value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "s" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe's" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "list" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'list" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "placeholder" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "maybe'placeholder" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.AttrValue +instance Data.ProtoLens.Field.HasField "s" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "f" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "b" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList +instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance Data.ProtoLens.Field.HasField "maybe'value" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry + +module Proto.Tensorflow.Core.Framework.NodeDef +data NodeDef +NodeDef :: Text -> Text -> [Text] -> Text -> Map Text AttrValue -> NodeDef +[_NodeDef'name] :: NodeDef -> Text +[_NodeDef'op] :: NodeDef -> Text +[_NodeDef'input] :: NodeDef -> [Text] +[_NodeDef'device] :: NodeDef -> Text +[_NodeDef'attr] :: NodeDef -> Map Text AttrValue +data NodeDef'AttrEntry +NodeDef'AttrEntry :: Text -> Maybe AttrValue -> NodeDef'AttrEntry +[_NodeDef'AttrEntry'key] :: NodeDef'AttrEntry -> Text +[_NodeDef'AttrEntry'value] :: NodeDef'AttrEntry -> Maybe AttrValue +attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') +device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') +input :: HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg') +key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') +maybe'value :: HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') +name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') +op :: HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') +value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "op" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "input" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "device" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.NodeDef.NodeDef +instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance Data.ProtoLens.Field.HasField "maybe'value" Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.NodeDef.NodeDef'AttrEntry + +module Proto.Tensorflow.Core.Framework.OpDef +data OpDef +OpDef :: Text -> [OpDef'ArgDef] -> [OpDef'ArgDef] -> [OpDef'AttrDef] -> Maybe OpDeprecation -> Text -> Text -> Bool -> Bool -> Bool -> Bool -> OpDef +[_OpDef'name] :: OpDef -> Text +[_OpDef'inputArg] :: OpDef -> [OpDef'ArgDef] +[_OpDef'outputArg] :: OpDef -> [OpDef'ArgDef] +[_OpDef'attr] :: OpDef -> [OpDef'AttrDef] +[_OpDef'deprecation] :: OpDef -> Maybe OpDeprecation +[_OpDef'summary] :: OpDef -> Text +[_OpDef'description] :: OpDef -> Text +[_OpDef'isCommutative] :: OpDef -> Bool +[_OpDef'isAggregate] :: OpDef -> Bool +[_OpDef'isStateful] :: OpDef -> Bool +[_OpDef'allowsUninitializedInput] :: OpDef -> Bool +data OpDef'ArgDef +OpDef'ArgDef :: Text -> Text -> DataType -> Text -> Text -> Text -> Bool -> OpDef'ArgDef +[_OpDef'ArgDef'name] :: OpDef'ArgDef -> Text +[_OpDef'ArgDef'description] :: OpDef'ArgDef -> Text +[_OpDef'ArgDef'type'] :: OpDef'ArgDef -> DataType +[_OpDef'ArgDef'typeAttr] :: OpDef'ArgDef -> Text +[_OpDef'ArgDef'numberAttr] :: OpDef'ArgDef -> Text +[_OpDef'ArgDef'typeListAttr] :: OpDef'ArgDef -> Text +[_OpDef'ArgDef'isRef] :: OpDef'ArgDef -> Bool +data OpDef'AttrDef +OpDef'AttrDef :: Text -> Text -> Maybe AttrValue -> Text -> Bool -> Int64 -> Maybe AttrValue -> OpDef'AttrDef +[_OpDef'AttrDef'name] :: OpDef'AttrDef -> Text +[_OpDef'AttrDef'type'] :: OpDef'AttrDef -> Text +[_OpDef'AttrDef'defaultValue] :: OpDef'AttrDef -> Maybe AttrValue +[_OpDef'AttrDef'description] :: OpDef'AttrDef -> Text +[_OpDef'AttrDef'hasMinimum] :: OpDef'AttrDef -> Bool +[_OpDef'AttrDef'minimum] :: OpDef'AttrDef -> Int64 +[_OpDef'AttrDef'allowedValues] :: OpDef'AttrDef -> Maybe AttrValue +data OpDeprecation +OpDeprecation :: Int32 -> Text -> OpDeprecation +[_OpDeprecation'version] :: OpDeprecation -> Int32 +[_OpDeprecation'explanation] :: OpDeprecation -> Text +data OpList +OpList :: [OpDef] -> OpList +[_OpList'op] :: OpList -> [OpDef] +allowedValues :: HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg') +allowsUninitializedInput :: HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg') +attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') +defaultValue :: HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg') +deprecation :: HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg') +description :: HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg') +explanation :: HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg') +hasMinimum :: HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg') +inputArg :: HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg') +isAggregate :: HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg') +isCommutative :: HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg') +isRef :: HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg') +isStateful :: HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg') +maybe'allowedValues :: HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg') +maybe'defaultValue :: HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg') +maybe'deprecation :: HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg') +minimum :: HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg') +name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') +numberAttr :: HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg') +op :: HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') +outputArg :: HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg') +summary :: HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') +type' :: HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') +typeAttr :: HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg') +typeListAttr :: HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg') +version :: HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpList +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpList +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "inputArg" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "outputArg" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "attr" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "deprecation" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "maybe'deprecation" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "summary" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "isCommutative" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "isAggregate" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "isStateful" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "allowsUninitializedInput" Proto.Tensorflow.Core.Framework.OpDef.OpDef Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "typeAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "numberAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "typeListAttr" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "isRef" Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef'ArgDef +instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "defaultValue" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "maybe'defaultValue" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "description" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "hasMinimum" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "minimum" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "allowedValues" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "maybe'allowedValues" Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDef'AttrDef +instance Data.ProtoLens.Field.HasField "version" Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance Data.ProtoLens.Field.HasField "explanation" Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpDeprecation +instance Data.ProtoLens.Field.HasField "op" Proto.Tensorflow.Core.Framework.OpDef.OpList Proto.Tensorflow.Core.Framework.OpDef.OpList +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.OpDef.OpList +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.OpList + +module Proto.Tensorflow.Core.Framework.Graph +data GraphDef +GraphDef :: [NodeDef] -> Maybe VersionDef -> Int32 -> Maybe FunctionDefLibrary -> GraphDef +[_GraphDef'node] :: GraphDef -> [NodeDef] +[_GraphDef'versions] :: GraphDef -> Maybe VersionDef +[_GraphDef'version] :: GraphDef -> Int32 +[_GraphDef'library] :: GraphDef -> Maybe FunctionDefLibrary +library :: HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg') +maybe'library :: HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg') +maybe'versions :: HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg') +node :: HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg') +version :: HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') +versions :: HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "node" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "versions" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "maybe'versions" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "version" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "library" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Field.HasField "maybe'library" Proto.Tensorflow.Core.Framework.Graph.GraphDef Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Graph.GraphDef +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Graph.GraphDef + +module Proto.Tensorflow.Core.Protobuf.Config +data ConfigProto +ConfigProto :: Map Text Int32 -> Int32 -> Int32 -> Bool -> [ThreadPoolOptionProto] -> Int32 -> [Text] -> Maybe GPUOptions -> Bool -> Bool -> Maybe GraphOptions -> Int64 -> ConfigProto +[_ConfigProto'deviceCount] :: ConfigProto -> Map Text Int32 +[_ConfigProto'intraOpParallelismThreads] :: ConfigProto -> Int32 +[_ConfigProto'interOpParallelismThreads] :: ConfigProto -> Int32 +[_ConfigProto'usePerSessionThreads] :: ConfigProto -> Bool +[_ConfigProto'sessionInterOpThreadPool] :: ConfigProto -> [ThreadPoolOptionProto] +[_ConfigProto'placementPeriod] :: ConfigProto -> Int32 +[_ConfigProto'deviceFilters] :: ConfigProto -> [Text] +[_ConfigProto'gpuOptions] :: ConfigProto -> Maybe GPUOptions +[_ConfigProto'allowSoftPlacement] :: ConfigProto -> Bool +[_ConfigProto'logDevicePlacement] :: ConfigProto -> Bool +[_ConfigProto'graphOptions] :: ConfigProto -> Maybe GraphOptions +[_ConfigProto'operationTimeoutInMs] :: ConfigProto -> Int64 +data ConfigProto'DeviceCountEntry +ConfigProto'DeviceCountEntry :: Text -> Int32 -> ConfigProto'DeviceCountEntry +[_ConfigProto'DeviceCountEntry'key] :: ConfigProto'DeviceCountEntry -> Text +[_ConfigProto'DeviceCountEntry'value] :: ConfigProto'DeviceCountEntry -> Int32 +data DebugTensorWatch +DebugTensorWatch :: Text -> Int32 -> [Text] -> [Text] -> DebugTensorWatch +[_DebugTensorWatch'nodeName] :: DebugTensorWatch -> Text +[_DebugTensorWatch'outputSlot] :: DebugTensorWatch -> Int32 +[_DebugTensorWatch'debugOps] :: DebugTensorWatch -> [Text] +[_DebugTensorWatch'debugUrls] :: DebugTensorWatch -> [Text] +data GPUOptions +GPUOptions :: Double -> Text -> Int64 -> Bool -> Text -> GPUOptions +[_GPUOptions'perProcessGpuMemoryFraction] :: GPUOptions -> Double +[_GPUOptions'allocatorType] :: GPUOptions -> Text +[_GPUOptions'deferredDeletionBytes] :: GPUOptions -> Int64 +[_GPUOptions'allowGrowth] :: GPUOptions -> Bool +[_GPUOptions'visibleDeviceList] :: GPUOptions -> Text +data GraphOptions +GraphOptions :: Bool -> Maybe OptimizerOptions -> Int64 -> Int64 -> Bool -> Bool -> Bool -> Int32 -> GraphOptions +[_GraphOptions'enableRecvScheduling] :: GraphOptions -> Bool +[_GraphOptions'optimizerOptions] :: GraphOptions -> Maybe OptimizerOptions +[_GraphOptions'buildCostModel] :: GraphOptions -> Int64 +[_GraphOptions'buildCostModelAfter] :: GraphOptions -> Int64 +[_GraphOptions'inferShapes] :: GraphOptions -> Bool +[_GraphOptions'placePrunedGraph] :: GraphOptions -> Bool +[_GraphOptions'enableBfloat16Sendrecv] :: GraphOptions -> Bool +[_GraphOptions'timelineStep] :: GraphOptions -> Int32 +data OptimizerOptions +OptimizerOptions :: Bool -> Bool -> Bool -> OptimizerOptions'Level -> OptimizerOptions +[_OptimizerOptions'doCommonSubexpressionElimination] :: OptimizerOptions -> Bool +[_OptimizerOptions'doConstantFolding] :: OptimizerOptions -> Bool +[_OptimizerOptions'doFunctionInlining] :: OptimizerOptions -> Bool +[_OptimizerOptions'optLevel] :: OptimizerOptions -> OptimizerOptions'Level +data OptimizerOptions'Level +OptimizerOptions'L0 :: OptimizerOptions'Level +OptimizerOptions'L1 :: OptimizerOptions'Level +data RunMetadata +RunMetadata :: Maybe StepStats -> Maybe CostGraphDef -> [GraphDef] -> RunMetadata +[_RunMetadata'stepStats] :: RunMetadata -> Maybe StepStats +[_RunMetadata'costGraph] :: RunMetadata -> Maybe CostGraphDef +[_RunMetadata'partitionGraphs] :: RunMetadata -> [GraphDef] +data RunOptions +RunOptions :: RunOptions'TraceLevel -> Int64 -> Int32 -> [DebugTensorWatch] -> Bool -> RunOptions +[_RunOptions'traceLevel] :: RunOptions -> RunOptions'TraceLevel +[_RunOptions'timeoutInMs] :: RunOptions -> Int64 +[_RunOptions'interOpThreadPool] :: RunOptions -> Int32 +[_RunOptions'debugTensorWatchOpts] :: RunOptions -> [DebugTensorWatch] +[_RunOptions'outputPartitionGraphs] :: RunOptions -> Bool +data RunOptions'TraceLevel +RunOptions'NO_TRACE :: RunOptions'TraceLevel +RunOptions'SOFTWARE_TRACE :: RunOptions'TraceLevel +RunOptions'HARDWARE_TRACE :: RunOptions'TraceLevel +RunOptions'FULL_TRACE :: RunOptions'TraceLevel +data ThreadPoolOptionProto +ThreadPoolOptionProto :: Int32 -> ThreadPoolOptionProto +[_ThreadPoolOptionProto'numThreads] :: ThreadPoolOptionProto -> Int32 +allocatorType :: HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg') +allowGrowth :: HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg') +allowSoftPlacement :: HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg') +buildCostModel :: HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg') +buildCostModelAfter :: HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg') +costGraph :: HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg') +debugOps :: HasField "debugOps" msg msg' => Lens msg msg' (Field "debugOps" msg) (Field "debugOps" msg') +debugTensorWatchOpts :: HasField "debugTensorWatchOpts" msg msg' => Lens msg msg' (Field "debugTensorWatchOpts" msg) (Field "debugTensorWatchOpts" msg') +debugUrls :: HasField "debugUrls" msg msg' => Lens msg msg' (Field "debugUrls" msg) (Field "debugUrls" msg') +deferredDeletionBytes :: HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg') +deviceCount :: HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg') +deviceFilters :: HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg') +doCommonSubexpressionElimination :: HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg') +doConstantFolding :: HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg') +doFunctionInlining :: HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg') +enableBfloat16Sendrecv :: HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg') +enableRecvScheduling :: HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg') +gpuOptions :: HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg') +graphOptions :: HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg') +inferShapes :: HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg') +interOpParallelismThreads :: HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg') +interOpThreadPool :: HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg') +intraOpParallelismThreads :: HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg') +key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') +logDevicePlacement :: HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg') +maybe'costGraph :: HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg') +maybe'gpuOptions :: HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg') +maybe'graphOptions :: HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg') +maybe'optimizerOptions :: HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg') +maybe'stepStats :: HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg') +nodeName :: HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') +numThreads :: HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg') +operationTimeoutInMs :: HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg') +optLevel :: HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg') +optimizerOptions :: HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg') +outputPartitionGraphs :: HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg') +outputSlot :: HasField "outputSlot" msg msg' => Lens msg msg' (Field "outputSlot" msg) (Field "outputSlot" msg') +partitionGraphs :: HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg') +perProcessGpuMemoryFraction :: HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg') +placePrunedGraph :: HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg') +placementPeriod :: HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg') +sessionInterOpThreadPool :: HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg') +stepStats :: HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg') +timelineStep :: HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg') +timeoutInMs :: HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg') +traceLevel :: HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg') +usePerSessionThreads :: HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg') +value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') +visibleDeviceList :: HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance Data.ProtoLens.Field.HasField "deviceCount" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "intraOpParallelismThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "interOpParallelismThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "usePerSessionThreads" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "sessionInterOpThreadPool" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "placementPeriod" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "deviceFilters" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "gpuOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "maybe'gpuOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "allowSoftPlacement" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "logDevicePlacement" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "maybe'graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "operationTimeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry +instance Data.ProtoLens.Field.HasField "nodeName" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.ProtoLens.Field.HasField "outputSlot" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.ProtoLens.Field.HasField "debugOps" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.ProtoLens.Field.HasField "debugUrls" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch +instance Data.ProtoLens.Field.HasField "perProcessGpuMemoryFraction" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Field.HasField "allocatorType" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Field.HasField "deferredDeletionBytes" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Field.HasField "allowGrowth" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Field.HasField "visibleDeviceList" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.GPUOptions +instance Data.ProtoLens.Field.HasField "enableRecvScheduling" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "optimizerOptions" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "maybe'optimizerOptions" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "buildCostModel" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "buildCostModelAfter" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "inferShapes" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "placePrunedGraph" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "enableBfloat16Sendrecv" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "timelineStep" Proto.Tensorflow.Core.Protobuf.Config.GraphOptions Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.GraphOptions +instance Data.ProtoLens.Field.HasField "doCommonSubexpressionElimination" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.ProtoLens.Field.HasField "doConstantFolding" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.ProtoLens.Field.HasField "doFunctionInlining" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.ProtoLens.Field.HasField "optLevel" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance Data.ProtoLens.Field.HasField "stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Field.HasField "maybe'stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Field.HasField "costGraph" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Field.HasField "maybe'costGraph" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Field.HasField "partitionGraphs" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance Data.ProtoLens.Field.HasField "traceLevel" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "timeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "interOpThreadPool" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "debugTensorWatchOpts" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "outputPartitionGraphs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel +instance Data.ProtoLens.Field.HasField "numThreads" Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ThreadPoolOptionProto diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html new file mode 100644 index 0000000..f8673b4 --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html @@ -0,0 +1,9 @@ +TensorFlow.Queue

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Queue

    Description

    Queues in TensorFlow graph. Very limited support for now.

    Synopsis

    Documentation

    data Queue2 a b Source

    A queue carrying tuples. The underlying structure is more + versatile and can be made to support arbitrary tuples.

    makeQueue2 Source

    Arguments

    :: (TensorType a, TensorType b) 
    => Int64

    The upper bound on the number of elements in + this queue. Negative numbers mean no limit.

    -> ByteString

    If non-empty, this queue will be shared + under the given name across multiple sessions.

    -> Build (Queue2 a b) 

    Creates a new queue with the given capacity and shared name.

    enqueue :: forall a b v1 v2. (TensorType a, TensorType b) => Queue2 a b -> Tensor v1 a -> Tensor v2 b -> Build ControlNode Source

    Adds the given values to the queue.

    dequeue Source

    Arguments

    :: (TensorType a, TensorType b) 
    => Queue2 a b 
    -> Build (Tensor Ref a, Tensor Ref b)

    Dequeued tensors. They are paired in a sense + that values appear together, even if they are + not consumed together.

    Retrieves the values from the queue.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html new file mode 100644 index 0000000..f9ac0fb --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues. (Index)

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/frames.html b/docs/haddock/tensorflow-queue-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-queue-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000000000000000000000000000000000000..0ff8579fbd897417b0d6dad6e920f8882138a7c0 GIT binary patch literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues. \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/index.html b/docs/haddock/tensorflow-queue-0.1.0.0/index.html new file mode 100644 index 0000000..a905134 --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    Please see README.md

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html new file mode 100644 index 0000000..2e16ab1 --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html @@ -0,0 +1,4 @@ +TensorFlow.Queue

    TensorFlow.Queue

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/minus.gif b/docs/haddock/tensorflow-queue-0.1.0.0/minus.gif new file mode 100644 index 0000000000000000000000000000000000000000..1deac2fe1a42e35b994f1b855488f392c50f6a89 GIT binary patch literal 56 zcmZ?wbhEHb * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/plus.gif b/docs/haddock/tensorflow-queue-0.1.0.0/plus.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d15c14173d23f664b955cd24f51c82f5f09d91d GIT binary patch literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html new file mode 100644 index 0000000..df19140 --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html @@ -0,0 +1,89 @@ + + + + + +src/TensorFlow/Queue.hs + + + +
    -- Copyright 2016 TensorFlow authors.
    +--
    +-- Licensed under the Apache License, Version 2.0 (the "License");
    +-- you may not use this file except in compliance with the License.
    +-- You may obtain a copy of the License at
    +--
    +--     http://www.apache.org/licenses/LICENSE-2.0
    +--
    +-- Unless required by applicable law or agreed to in writing, software
    +-- distributed under the License is distributed on an "AS IS" BASIS,
    +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +-- See the License for the specific language governing permissions and
    +-- limitations under the License.
    +
    +{-# LANGUAGE OverloadedStrings #-}
    +{-# LANGUAGE ScopedTypeVariables #-}
    +
    +-- | Queues in TensorFlow graph. Very limited support for now.
    +module TensorFlow.Queue (Queue2, makeQueue2, enqueue, dequeue) where
    +
    +import Data.ByteString (ByteString)
    +import Data.Int (Int64)
    +import Lens.Family2 ((.~), (&))
    +import TensorFlow.Build (ControlNode, Build, addInitializer, opAttr, opDef)
    +import TensorFlow.BuildOp (buildOp)
    +import TensorFlow.ControlFlow (group)
    +import TensorFlow.Tensor (Ref, Tensor)
    +import TensorFlow.Types (TensorType, tensorType)
    +
    +-- | A queue carrying tuples. The underlying structure is more
    +-- versatile and can be made to support arbitrary tuples.
    +data Queue2 a b = Queue2 { handle :: Handle }
    +
    +type Handle = Tensor Ref ByteString
    +
    +-- | Adds the given values to the queue.
    +enqueue :: forall a b v1 v2. (TensorType a, TensorType b)
    +           => Queue2 a b
    +           -> Tensor v1 a
    +           -> Tensor v2 b
    +           -> Build ControlNode
    +enqueue q =
    +    buildOp (opDef "QueueEnqueue"
    +             & opAttr "Tcomponents" .~ [ tensorType (undefined :: a)
    +                                       , tensorType (undefined :: b)])
    +    (handle q)
    +
    +-- | Retrieves the values from the queue.
    +dequeue :: forall a b . (TensorType a, TensorType b)
    +           => Queue2 a b
    +           -> Build (Tensor Ref a, Tensor Ref b)
    +           -- ^ Dequeued tensors. They are paired in a sense
    +           -- that values appear together, even if they are
    +           -- not consumed together.
    +dequeue q =
    +    buildOp (opDef "QueueDequeue"
    +             & opAttr "component_types" .~ [ tensorType (undefined :: a)
    +                                           , tensorType (undefined :: b)])
    +    (handle q)
    +
    +-- | Creates a new queue with the given capacity and shared name.
    +makeQueue2 :: forall a b . (TensorType a, TensorType b)
    +              => Int64  -- ^ The upper bound on the number of elements in
    +                        --  this queue. Negative numbers mean no limit.
    +              -> ByteString -- ^ If non-empty, this queue will be shared
    +                            -- under the given name across multiple sessions.
    +              -> Build (Queue2 a b)
    +makeQueue2 capacity sharedName = do
    +    q <- buildOp (opDef "FIFOQueue"
    +                     & opAttr "component_types" .~ [ tensorType (undefined :: a)
    +                                                   , tensorType (undefined :: b)]
    +                     & opAttr "shared_name" .~ sharedName
    +                     & opAttr "capacity" .~ capacity
    +                    )
    +    group q >>= addInitializer
    +    return (Queue2 q)
    +
    +-- TODO(gnezdo): Figure out the closing story for queues.
    +
    + diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css new file mode 100644 index 0000000..c15919e --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css @@ -0,0 +1,5 @@ +.hs-keyglyph, .hs-layout {color: red;} +.hs-keyword {color: blue;} +.hs-comment, .hs-comment a {color: green;} +.hs-str, .hs-chr {color: teal;} +.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-queue-0.1.0.0/synopsis.png new file mode 100644 index 0000000000000000000000000000000000000000..85fb86ec84907bcc86531dc82871948ff4d471fa GIT binary patch literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ literal 0 HcmV?d00001 diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt b/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt new file mode 100644 index 0000000..2db224c --- /dev/null +++ b/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt @@ -0,0 +1,26 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Basic access to TensorFlow queues. +-- +-- Please see README.md +@package tensorflow-queue +@version 0.1.0.0 + + +-- | Queues in TensorFlow graph. Very limited support for now. +module TensorFlow.Queue + +-- | A queue carrying tuples. The underlying structure is more versatile +-- and can be made to support arbitrary tuples. +data Queue2 a b + +-- | Creates a new queue with the given capacity and shared name. +makeQueue2 :: (TensorType a, TensorType b) => Int64 -> ByteString -> Build (Queue2 a b) + +-- | Adds the given values to the queue. +enqueue :: (TensorType a, TensorType b) => Queue2 a b -> Tensor v1 a -> Tensor v2 b -> Build ControlNode + +-- | Retrieves the values from the queue. +dequeue :: (TensorType a, TensorType b) => Queue2 a b -> Build (Tensor Ref a, Tensor Ref b) diff --git a/tools/haddock.sh b/tools/haddock.sh index f31248d..5f1b7ed 100755 --- a/tools/haddock.sh +++ b/tools/haddock.sh @@ -10,8 +10,9 @@ STACK="stack --docker --docker-image=$IMAGE_NAME" $STACK haddock --no-haddock-deps tensorflow* DOC_ROOT=$($STACK path --local-doc-root) DOCS=docs/haddock -git rm -r $DOCS +git rm -fr $DOCS mkdir -p $DOCS cp $DOC_ROOT/{*.html,*js,*.png,*.gif,*.css} $DOCS cp -a $DOC_ROOT/tensorflow* $DOCS +rm -f $DOCS/*/*.haddock git add $DOCS