<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<!-- Generated by HsColour, http://code.haskell.org/~malcolm/hscolour/ -->
<title>.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/TensorFlow/GenOps/Core.hs</title>
<link type='text/css' rel='stylesheet' href='hscolour.css' />
</head>
<body>
<pre><a name="line-1"></a><span class='hs-comment'>{-# LANGUAGE ConstraintKinds #-}</span>
<a name="line-2"></a><span class='hs-comment'>{-# LANGUAGE DataKinds #-}</span>
<a name="line-3"></a><span class='hs-comment'>{-# LANGUAGE FlexibleInstances #-}</span>
<a name="line-4"></a><span class='hs-comment'>{-# LANGUAGE OverloadedStrings #-}</span>
<a name="line-5"></a><span class='hs-comment'>{-# LANGUAGE ScopedTypeVariables #-}</span>
<a name="line-6"></a><span class='hs-comment'>{-# OPTIONS_GHC -fno-warn-name-shadowing #-}</span>
<a name="line-7"></a><span class='hs-comment'>{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}</span>
<a name="line-8"></a><span class='hs-keyword'>module</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>GenOps</span><span class='hs-varop'>.</span><span class='hs-conid'>Core</span> <span class='hs-keyword'>where</span>
<a name="line-9"></a>
<a name="line-10"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-layout'>(</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span>
<a name="line-11"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-layout'>(</span><span class='hs-conid'>Complex</span><span class='hs-layout'>)</span>
<a name="line-12"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span> <span class='hs-layout'>(</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-13"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span> <span class='hs-layout'>(</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Word16</span><span class='hs-layout'>)</span>
<a name="line-14"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>Lens</span><span class='hs-varop'>.</span><span class='hs-conid'>Family2</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-varop'>.~</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span> <span class='hs-layout'>(</span><span class='hs-varop'>&amp;</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>Build</span>
<a name="line-16"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>BuildOp</span>
<a name="line-17"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>Output</span> <span class='hs-layout'>(</span><span class='hs-conid'>ResourceHandle</span><span class='hs-layout'>)</span>
<a name="line-18"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>Tensor</span>
<a name="line-19"></a><span class='hs-keyword'>import</span> <span class='hs-conid'>TensorFlow</span><span class='hs-varop'>.</span><span class='hs-conid'>Types</span>
<a name="line-20"></a>
<a name="line-21"></a><span class='hs-comment'>-- | Receives the named tensor from send_device on recv_device.</span>
<a name="line-22"></a><span class='hs-comment'>--</span>
<a name="line-23"></a><span class='hs-comment'>-- _HostRecv requires its input on host memory whereas _Recv requires its</span>
<a name="line-24"></a><span class='hs-comment'>-- input on device memory.</span>
<a name="line-25"></a><span class='hs-sel'>_HostRecv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>tensor_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26"></a>             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __send_device_incarnation__: The current incarnation of send_device.</span>
<a name="line-27"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __tensor__: The tensor to receive.</span>
<a name="line-28"></a><span class='hs-sel'>_HostRecv</span> <span class='hs-varid'>send_device_incarnation</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-29"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_HostRecv"</span>
<a name="line-30"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"tensor_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span>
<a name="line-31"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"send_device_incarnation"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>send_device_incarnation</span><span class='hs-layout'>)</span>
<a name="line-32"></a>        
<a name="line-33"></a><span class='hs-comment'>{-
<a name="line-34"></a>attr { name: "tensor_type" type: "type" }
<a name="line-35"></a>attr {
<a name="line-36"></a>  description: "The name of the tensor to receive."
<a name="line-37"></a>  name: "tensor_name"
<a name="line-38"></a>  type: "string"
<a name="line-39"></a>}
<a name="line-40"></a>attr {
<a name="line-41"></a>  description: "The name of the device sending the tensor."
<a name="line-42"></a>  name: "send_device"
<a name="line-43"></a>  type: "string"
<a name="line-44"></a>}
<a name="line-45"></a>attr {
<a name="line-46"></a>  description: "The current incarnation of send_device."
<a name="line-47"></a>  name: "send_device_incarnation"
<a name="line-48"></a>  type: "int"
<a name="line-49"></a>}
<a name="line-50"></a>attr {
<a name="line-51"></a>  description: "The name of the device receiving the tensor."
<a name="line-52"></a>  name: "recv_device"
<a name="line-53"></a>  type: "string"
<a name="line-54"></a>}
<a name="line-55"></a>attr {
<a name="line-56"></a>  default_value { b: false }
<a name="line-57"></a>  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
<a name="line-58"></a>  name: "client_terminated"
<a name="line-59"></a>  type: "bool"
<a name="line-60"></a>}
<a name="line-61"></a>output_arg {
<a name="line-62"></a>  description: "The tensor to receive."
<a name="line-63"></a>  name: "tensor"
<a name="line-64"></a>  type_attr: "tensor_type"
<a name="line-65"></a>}
<a name="line-66"></a>-}</span>
<a name="line-67"></a>
<a name="line-68"></a><span class='hs-comment'>-- | Sends the named tensor from send_device to recv_device.</span>
<a name="line-69"></a><span class='hs-comment'>--</span>
<a name="line-70"></a><span class='hs-comment'>-- _HostSend requires its input on host memory whereas _Send requires its</span>
<a name="line-71"></a><span class='hs-comment'>-- input on device memory.</span>
<a name="line-72"></a><span class='hs-sel'>_HostSend</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-73"></a>             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __send_device_incarnation__: The current incarnation of send_device.</span>
<a name="line-74"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: The tensor to send.</span>
<a name="line-75"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-76"></a><span class='hs-sel'>_HostSend</span> <span class='hs-varid'>send_device_incarnation</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-77"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_HostSend"</span>
<a name="line-78"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-79"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"send_device_incarnation"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>send_device_incarnation</span><span class='hs-layout'>)</span>
<a name="line-80"></a>        <span class='hs-varid'>tensor</span>
<a name="line-81"></a><span class='hs-comment'>{-
<a name="line-82"></a>attr { name: "T" type: "type" }
<a name="line-83"></a>attr {
<a name="line-84"></a>  description: "The name of the tensor to send."
<a name="line-85"></a>  name: "tensor_name"
<a name="line-86"></a>  type: "string"
<a name="line-87"></a>}
<a name="line-88"></a>attr {
<a name="line-89"></a>  description: "The name of the device sending the tensor."
<a name="line-90"></a>  name: "send_device"
<a name="line-91"></a>  type: "string"
<a name="line-92"></a>}
<a name="line-93"></a>attr {
<a name="line-94"></a>  description: "The current incarnation of send_device."
<a name="line-95"></a>  name: "send_device_incarnation"
<a name="line-96"></a>  type: "int"
<a name="line-97"></a>}
<a name="line-98"></a>attr {
<a name="line-99"></a>  description: "The name of the device receiving the tensor."
<a name="line-100"></a>  name: "recv_device"
<a name="line-101"></a>  type: "string"
<a name="line-102"></a>}
<a name="line-103"></a>attr {
<a name="line-104"></a>  default_value { b: false }
<a name="line-105"></a>  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
<a name="line-106"></a>  name: "client_terminated"
<a name="line-107"></a>  type: "bool"
<a name="line-108"></a>}
<a name="line-109"></a>input_arg {
<a name="line-110"></a>  description: "The tensor to send." name: "tensor" type_attr: "T"
<a name="line-111"></a>}
<a name="line-112"></a>-}</span>
<a name="line-113"></a>
<a name="line-114"></a><span class='hs-comment'>-- | Receives the named tensor from send_device on recv_device.</span>
<a name="line-115"></a>
<a name="line-116"></a><span class='hs-sel'>_Recv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>tensor_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-117"></a>         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __send_device_incarnation__: The current incarnation of send_device.</span>
<a name="line-118"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __tensor__: The tensor to receive.</span>
<a name="line-119"></a><span class='hs-sel'>_Recv</span> <span class='hs-varid'>send_device_incarnation</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-120"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_Recv"</span>
<a name="line-121"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"tensor_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tensor_type</span><span class='hs-layout'>)</span>
<a name="line-122"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"send_device_incarnation"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>send_device_incarnation</span><span class='hs-layout'>)</span>
<a name="line-123"></a>        
<a name="line-124"></a><span class='hs-comment'>{-
<a name="line-125"></a>attr { name: "tensor_type" type: "type" }
<a name="line-126"></a>attr {
<a name="line-127"></a>  description: "The name of the tensor to receive."
<a name="line-128"></a>  name: "tensor_name"
<a name="line-129"></a>  type: "string"
<a name="line-130"></a>}
<a name="line-131"></a>attr {
<a name="line-132"></a>  description: "The name of the device sending the tensor."
<a name="line-133"></a>  name: "send_device"
<a name="line-134"></a>  type: "string"
<a name="line-135"></a>}
<a name="line-136"></a>attr {
<a name="line-137"></a>  description: "The current incarnation of send_device."
<a name="line-138"></a>  name: "send_device_incarnation"
<a name="line-139"></a>  type: "int"
<a name="line-140"></a>}
<a name="line-141"></a>attr {
<a name="line-142"></a>  description: "The name of the device receiving the tensor."
<a name="line-143"></a>  name: "recv_device"
<a name="line-144"></a>  type: "string"
<a name="line-145"></a>}
<a name="line-146"></a>attr {
<a name="line-147"></a>  default_value { b: false }
<a name="line-148"></a>  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
<a name="line-149"></a>  name: "client_terminated"
<a name="line-150"></a>  type: "bool"
<a name="line-151"></a>}
<a name="line-152"></a>output_arg {
<a name="line-153"></a>  description: "The tensor to receive."
<a name="line-154"></a>  name: "tensor"
<a name="line-155"></a>  type_attr: "tensor_type"
<a name="line-156"></a>}
<a name="line-157"></a>-}</span>
<a name="line-158"></a>
<a name="line-159"></a><span class='hs-comment'>-- | Sends the named tensor from send_device to recv_device.</span>
<a name="line-160"></a>
<a name="line-161"></a><span class='hs-sel'>_Send</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-162"></a>         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __send_device_incarnation__: The current incarnation of send_device.</span>
<a name="line-163"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: The tensor to send.</span>
<a name="line-164"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-165"></a><span class='hs-sel'>_Send</span> <span class='hs-varid'>send_device_incarnation</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-166"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_Send"</span>
<a name="line-167"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-168"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"send_device_incarnation"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>send_device_incarnation</span><span class='hs-layout'>)</span>
<a name="line-169"></a>        <span class='hs-varid'>tensor</span>
<a name="line-170"></a><span class='hs-comment'>{-
<a name="line-171"></a>attr { name: "T" type: "type" }
<a name="line-172"></a>attr {
<a name="line-173"></a>  description: "The name of the tensor to send."
<a name="line-174"></a>  name: "tensor_name"
<a name="line-175"></a>  type: "string"
<a name="line-176"></a>}
<a name="line-177"></a>attr {
<a name="line-178"></a>  description: "The name of the device sending the tensor."
<a name="line-179"></a>  name: "send_device"
<a name="line-180"></a>  type: "string"
<a name="line-181"></a>}
<a name="line-182"></a>attr {
<a name="line-183"></a>  description: "The current incarnation of send_device."
<a name="line-184"></a>  name: "send_device_incarnation"
<a name="line-185"></a>  type: "int"
<a name="line-186"></a>}
<a name="line-187"></a>attr {
<a name="line-188"></a>  description: "The name of the device receiving the tensor."
<a name="line-189"></a>  name: "recv_device"
<a name="line-190"></a>  type: "string"
<a name="line-191"></a>}
<a name="line-192"></a>attr {
<a name="line-193"></a>  default_value { b: false }
<a name="line-194"></a>  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
<a name="line-195"></a>  name: "client_terminated"
<a name="line-196"></a>  type: "bool"
<a name="line-197"></a>}
<a name="line-198"></a>input_arg {
<a name="line-199"></a>  description: "The tensor to send." name: "tensor" type_attr: "T"
<a name="line-200"></a>}
<a name="line-201"></a>-}</span>
<a name="line-202"></a>
<a name="line-203"></a><span class='hs-comment'>-- | Does nothing. Only useful as a placeholder for control edges.</span>
<a name="line-204"></a>
<a name="line-205"></a><a name="noOp"></a><span class='hs-definition'>noOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>ControlNode</span>
<a name="line-206"></a><span class='hs-definition'>noOp</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-207"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"NoOp"</span><span class='hs-layout'>)</span>
<a name="line-208"></a>        
<a name="line-209"></a><span class='hs-comment'>{-
<a name="line-210"></a>
<a name="line-211"></a>-}</span>
<a name="line-212"></a>
<a name="line-213"></a><span class='hs-comment'>-- | A graph node which represents a return value of a function.</span>
<a name="line-214"></a>
<a name="line-215"></a><span class='hs-sel'>_Retval</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-216"></a>           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __index__: This return value is the index-th return value of the function.</span>
<a name="line-217"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The return value.</span>
<a name="line-218"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-219"></a><span class='hs-sel'>_Retval</span> <span class='hs-varid'>index</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-220"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_Retval"</span>
<a name="line-221"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-222"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span>
<a name="line-223"></a>        <span class='hs-varid'>input</span>
<a name="line-224"></a><span class='hs-comment'>{-
<a name="line-225"></a>attr { name: "T" type: "type" }
<a name="line-226"></a>attr {
<a name="line-227"></a>  description: "This return value is the index-th return value of the function."
<a name="line-228"></a>  has_minimum: true
<a name="line-229"></a>  name: "index"
<a name="line-230"></a>  type: "int"
<a name="line-231"></a>}
<a name="line-232"></a>input_arg {
<a name="line-233"></a>  description: "The return value." name: "input" type_attr: "T"
<a name="line-234"></a>}
<a name="line-235"></a>-}</span>
<a name="line-236"></a>
<a name="line-237"></a><span class='hs-comment'>-- | A graph node which represents an argument to a function.</span>
<a name="line-238"></a>
<a name="line-239"></a><span class='hs-sel'>_Arg</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-240"></a>        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __index__: This argument is the index-th argument of the function.</span>
<a name="line-241"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The argument.</span>
<a name="line-242"></a><span class='hs-sel'>_Arg</span> <span class='hs-varid'>index</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-243"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_Arg"</span>
<a name="line-244"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-245"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span>
<a name="line-246"></a>        
<a name="line-247"></a><span class='hs-comment'>{-
<a name="line-248"></a>attr { name: "T" type: "type" }
<a name="line-249"></a>attr {
<a name="line-250"></a>  description: "This argument is the index-th argument of the function."
<a name="line-251"></a>  has_minimum: true
<a name="line-252"></a>  name: "index"
<a name="line-253"></a>  type: "int"
<a name="line-254"></a>}
<a name="line-255"></a>output_arg {
<a name="line-256"></a>  description: "The argument." name: "output" type_attr: "T"
<a name="line-257"></a>}
<a name="line-258"></a>-}</span>
<a name="line-259"></a>
<a name="line-260"></a><a name="quantizedBatchNormWithGlobalNormalization"></a><span class='hs-comment'>-- | Quantized Batch normalization.</span>
<a name="line-261"></a><span class='hs-comment'>--</span>
<a name="line-262"></a><span class='hs-comment'>-- This op is deprecated and will be removed in the future. Prefer</span>
<a name="line-263"></a><span class='hs-comment'>-- `tf.nn.batch_normalization`.</span>
<a name="line-264"></a><span class='hs-definition'>quantizedBatchNormWithGlobalNormalization</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span>
<a name="line-265"></a>                                             <span class='hs-varid'>v10</span> <span class='hs-varid'>v11</span> <span class='hs-varid'>v12</span> <span class='hs-varid'>v13</span> <span class='hs-varid'>v14</span> <span class='hs-varid'>v15</span> <span class='hs-varid'>tinput</span>
<a name="line-266"></a>                                             <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-267"></a>                                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-268"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-269"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-270"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-271"></a>                                                         <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-272"></a>                                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-273"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-274"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-275"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-276"></a>                                             <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor</span>
<a name="line-277"></a>                                                  <span class='hs-comment'>-- needs to be multiplied with gamma.</span>
<a name="line-278"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __variance_epsilon__: A small float number to avoid dividing by 0.</span>
<a name="line-279"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __t__: A 4D input Tensor.</span>
<a name="line-280"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __t_min__: The value represented by the lowest quantized input.</span>
<a name="line-281"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __t_max__: The value represented by the highest quantized input.</span>
<a name="line-282"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.</span>
<a name="line-283"></a>                                                                 <span class='hs-comment'>-- This is the first output from tf.nn.moments,</span>
<a name="line-284"></a>                                                                 <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-285"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __m_min__: The value represented by the lowest quantized mean.</span>
<a name="line-286"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __m_max__: The value represented by the highest quantized mean.</span>
<a name="line-287"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.</span>
<a name="line-288"></a>                                                                 <span class='hs-comment'>-- This is the second output from tf.nn.moments,</span>
<a name="line-289"></a>                                                                 <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-290"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __v_min__: The value represented by the lowest quantized variance.</span>
<a name="line-291"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __v_max__: The value represented by the highest quantized variance.</span>
<a name="line-292"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v10</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.</span>
<a name="line-293"></a>                                                                  <span class='hs-comment'>-- An offset to be added to the normalized tensor.</span>
<a name="line-294"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v11</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __beta_min__: The value represented by the lowest quantized offset.</span>
<a name="line-295"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v12</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __beta_max__: The value represented by the highest quantized offset.</span>
<a name="line-296"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v13</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.</span>
<a name="line-297"></a>                                                                  <span class='hs-comment'>-- If "scale_after_normalization" is true, this tensor will be multiplied</span>
<a name="line-298"></a>                                                                  <span class='hs-comment'>-- with the normalized tensor.</span>
<a name="line-299"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v14</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __gamma_min__: The value represented by the lowest quantized gamma.</span>
<a name="line-300"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v15</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __gamma_max__: The value represented by the highest quantized gamma.</span>
<a name="line-301"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-302"></a>                                                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-303"></a>                                                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-304"></a>                                             <span class='hs-comment'>-- ^ (__result__, __result_min__, __result_max__)</span>
<a name="line-305"></a>                                             <span class='hs-comment'>--</span>
<a name="line-306"></a>                                             <span class='hs-comment'>-- * __result__</span>
<a name="line-307"></a>                                             <span class='hs-comment'>--</span>
<a name="line-308"></a>                                             <span class='hs-comment'>-- * __result_min__</span>
<a name="line-309"></a>                                             <span class='hs-comment'>--</span>
<a name="line-310"></a>                                             <span class='hs-comment'>-- * __result_max__</span>
<a name="line-311"></a><span class='hs-definition'>quantizedBatchNormWithGlobalNormalization</span> <span class='hs-varid'>scale_after_normalization</span>
<a name="line-312"></a>                                          <span class='hs-varid'>variance_epsilon</span> <span class='hs-varid'>t</span> <span class='hs-varid'>t_min</span> <span class='hs-varid'>t_max</span> <span class='hs-varid'>m</span> <span class='hs-varid'>m_min</span>
<a name="line-313"></a>                                          <span class='hs-varid'>m_max</span> <span class='hs-varid'>v</span> <span class='hs-varid'>v_min</span> <span class='hs-varid'>v_max</span> <span class='hs-varid'>beta</span> <span class='hs-varid'>beta_min</span>
<a name="line-314"></a>                                          <span class='hs-varid'>beta_max</span> <span class='hs-varid'>gamma</span> <span class='hs-varid'>gamma_min</span>
<a name="line-315"></a>                                          <span class='hs-varid'>gamma_max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-316"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedBatchNormWithGlobalNormalization"</span>
<a name="line-317"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-318"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span>
<a name="line-319"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"scale_after_normalization"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>scale_after_normalization</span>
<a name="line-320"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"variance_epsilon"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>variance_epsilon</span><span class='hs-layout'>)</span>
<a name="line-321"></a>        <span class='hs-varid'>t</span> <span class='hs-varid'>t_min</span> <span class='hs-varid'>t_max</span> <span class='hs-varid'>m</span> <span class='hs-varid'>m_min</span> <span class='hs-varid'>m_max</span> <span class='hs-varid'>v</span> <span class='hs-varid'>v_min</span> <span class='hs-varid'>v_max</span> <span class='hs-varid'>beta</span> <span class='hs-varid'>beta_min</span> <span class='hs-varid'>beta_max</span> <span class='hs-varid'>gamma</span>
<a name="line-322"></a>        <span class='hs-varid'>gamma_min</span> <span class='hs-varid'>gamma_max</span>
<a name="line-323"></a><span class='hs-comment'>{-
<a name="line-324"></a>attr {
<a name="line-325"></a>  allowed_values {
<a name="line-326"></a>    list {
<a name="line-327"></a>      type: DT_QINT8
<a name="line-328"></a>      type: DT_QUINT8
<a name="line-329"></a>      type: DT_QINT16
<a name="line-330"></a>      type: DT_QUINT16
<a name="line-331"></a>      type: DT_QINT32
<a name="line-332"></a>    }
<a name="line-333"></a>  }
<a name="line-334"></a>  name: "Tinput"
<a name="line-335"></a>  type: "type"
<a name="line-336"></a>}
<a name="line-337"></a>attr {
<a name="line-338"></a>  allowed_values {
<a name="line-339"></a>    list {
<a name="line-340"></a>      type: DT_QINT8
<a name="line-341"></a>      type: DT_QUINT8
<a name="line-342"></a>      type: DT_QINT16
<a name="line-343"></a>      type: DT_QUINT16
<a name="line-344"></a>      type: DT_QINT32
<a name="line-345"></a>    }
<a name="line-346"></a>  }
<a name="line-347"></a>  name: "out_type"
<a name="line-348"></a>  type: "type"
<a name="line-349"></a>}
<a name="line-350"></a>attr {
<a name="line-351"></a>  description: "A small float number to avoid dividing by 0."
<a name="line-352"></a>  name: "variance_epsilon"
<a name="line-353"></a>  type: "float"
<a name="line-354"></a>}
<a name="line-355"></a>attr {
<a name="line-356"></a>  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
<a name="line-357"></a>  name: "scale_after_normalization"
<a name="line-358"></a>  type: "bool"
<a name="line-359"></a>}
<a name="line-360"></a>input_arg {
<a name="line-361"></a>  description: "A 4D input Tensor." name: "t" type_attr: "Tinput"
<a name="line-362"></a>}
<a name="line-363"></a>input_arg {
<a name="line-364"></a>  description: "The value represented by the lowest quantized input."
<a name="line-365"></a>  name: "t_min"
<a name="line-366"></a>  type: DT_FLOAT
<a name="line-367"></a>}
<a name="line-368"></a>input_arg {
<a name="line-369"></a>  description: "The value represented by the highest quantized input."
<a name="line-370"></a>  name: "t_max"
<a name="line-371"></a>  type: DT_FLOAT
<a name="line-372"></a>}
<a name="line-373"></a>input_arg {
<a name="line-374"></a>  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-375"></a>  name: "m"
<a name="line-376"></a>  type_attr: "Tinput"
<a name="line-377"></a>}
<a name="line-378"></a>input_arg {
<a name="line-379"></a>  description: "The value represented by the lowest quantized mean."
<a name="line-380"></a>  name: "m_min"
<a name="line-381"></a>  type: DT_FLOAT
<a name="line-382"></a>}
<a name="line-383"></a>input_arg {
<a name="line-384"></a>  description: "The value represented by the highest quantized mean."
<a name="line-385"></a>  name: "m_max"
<a name="line-386"></a>  type: DT_FLOAT
<a name="line-387"></a>}
<a name="line-388"></a>input_arg {
<a name="line-389"></a>  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-390"></a>  name: "v"
<a name="line-391"></a>  type_attr: "Tinput"
<a name="line-392"></a>}
<a name="line-393"></a>input_arg {
<a name="line-394"></a>  description: "The value represented by the lowest quantized variance."
<a name="line-395"></a>  name: "v_min"
<a name="line-396"></a>  type: DT_FLOAT
<a name="line-397"></a>}
<a name="line-398"></a>input_arg {
<a name="line-399"></a>  description: "The value represented by the highest quantized variance."
<a name="line-400"></a>  name: "v_max"
<a name="line-401"></a>  type: DT_FLOAT
<a name="line-402"></a>}
<a name="line-403"></a>input_arg {
<a name="line-404"></a>  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
<a name="line-405"></a>  name: "beta"
<a name="line-406"></a>  type_attr: "Tinput"
<a name="line-407"></a>}
<a name="line-408"></a>input_arg {
<a name="line-409"></a>  description: "The value represented by the lowest quantized offset."
<a name="line-410"></a>  name: "beta_min"
<a name="line-411"></a>  type: DT_FLOAT
<a name="line-412"></a>}
<a name="line-413"></a>input_arg {
<a name="line-414"></a>  description: "The value represented by the highest quantized offset."
<a name="line-415"></a>  name: "beta_max"
<a name="line-416"></a>  type: DT_FLOAT
<a name="line-417"></a>}
<a name="line-418"></a>input_arg {
<a name="line-419"></a>  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
<a name="line-420"></a>  name: "gamma"
<a name="line-421"></a>  type_attr: "Tinput"
<a name="line-422"></a>}
<a name="line-423"></a>input_arg {
<a name="line-424"></a>  description: "The value represented by the lowest quantized gamma."
<a name="line-425"></a>  name: "gamma_min"
<a name="line-426"></a>  type: DT_FLOAT
<a name="line-427"></a>}
<a name="line-428"></a>input_arg {
<a name="line-429"></a>  description: "The value represented by the highest quantized gamma."
<a name="line-430"></a>  name: "gamma_max"
<a name="line-431"></a>  type: DT_FLOAT
<a name="line-432"></a>}
<a name="line-433"></a>output_arg { name: "result" type_attr: "out_type" }
<a name="line-434"></a>output_arg { name: "result_min" type: DT_FLOAT }
<a name="line-435"></a>output_arg { name: "result_max" type: DT_FLOAT }
<a name="line-436"></a>-}</span>
<a name="line-437"></a>
<a name="line-438"></a><span class='hs-comment'>-- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`</span>
<a name="line-439"></a>
<a name="line-440"></a><a name="quantizedRelu6"></a><span class='hs-definition'>quantizedRelu6</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tinput</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-441"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-442"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-443"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-444"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-445"></a>                                                     <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-446"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-447"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-448"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-449"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-450"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-451"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_features__: The float value that the lowest quantized value represents.</span>
<a name="line-452"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_features__: The float value that the highest quantized value represents.</span>
<a name="line-453"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-454"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-455"></a>                  <span class='hs-comment'>-- ^ (__activations__, __min_activations__, __max_activations__)</span>
<a name="line-456"></a>                  <span class='hs-comment'>--</span>
<a name="line-457"></a>                  <span class='hs-comment'>-- * __activations__: Has the same output shape as "features".</span>
<a name="line-458"></a>                  <span class='hs-comment'>--</span>
<a name="line-459"></a>                  <span class='hs-comment'>-- * __min_activations__: The float value that the lowest quantized value represents.</span>
<a name="line-460"></a>                  <span class='hs-comment'>--</span>
<a name="line-461"></a>                  <span class='hs-comment'>-- * __max_activations__: The float value that the highest quantized value represents.</span>
<a name="line-462"></a><span class='hs-definition'>quantizedRelu6</span> <span class='hs-varid'>features</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-463"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedRelu6"</span>
<a name="line-464"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-465"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-466"></a>        <span class='hs-varid'>features</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span>
<a name="line-467"></a><span class='hs-comment'>{-
<a name="line-468"></a>attr {
<a name="line-469"></a>  allowed_values {
<a name="line-470"></a>    list {
<a name="line-471"></a>      type: DT_QINT8
<a name="line-472"></a>      type: DT_QUINT8
<a name="line-473"></a>      type: DT_QINT16
<a name="line-474"></a>      type: DT_QUINT16
<a name="line-475"></a>      type: DT_QINT32
<a name="line-476"></a>    }
<a name="line-477"></a>  }
<a name="line-478"></a>  name: "Tinput"
<a name="line-479"></a>  type: "type"
<a name="line-480"></a>}
<a name="line-481"></a>attr {
<a name="line-482"></a>  allowed_values {
<a name="line-483"></a>    list {
<a name="line-484"></a>      type: DT_QINT8
<a name="line-485"></a>      type: DT_QUINT8
<a name="line-486"></a>      type: DT_QINT16
<a name="line-487"></a>      type: DT_QUINT16
<a name="line-488"></a>      type: DT_QINT32
<a name="line-489"></a>    }
<a name="line-490"></a>  }
<a name="line-491"></a>  default_value { type: DT_QUINT8 }
<a name="line-492"></a>  name: "out_type"
<a name="line-493"></a>  type: "type"
<a name="line-494"></a>}
<a name="line-495"></a>input_arg { name: "features" type_attr: "Tinput" }
<a name="line-496"></a>input_arg {
<a name="line-497"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-498"></a>  name: "min_features"
<a name="line-499"></a>  type: DT_FLOAT
<a name="line-500"></a>}
<a name="line-501"></a>input_arg {
<a name="line-502"></a>  description: "The float value that the highest quantized value represents."
<a name="line-503"></a>  name: "max_features"
<a name="line-504"></a>  type: DT_FLOAT
<a name="line-505"></a>}
<a name="line-506"></a>output_arg {
<a name="line-507"></a>  description: "Has the same output shape as \"features\"."
<a name="line-508"></a>  name: "activations"
<a name="line-509"></a>  type_attr: "out_type"
<a name="line-510"></a>}
<a name="line-511"></a>output_arg {
<a name="line-512"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-513"></a>  name: "min_activations"
<a name="line-514"></a>  type: DT_FLOAT
<a name="line-515"></a>}
<a name="line-516"></a>output_arg {
<a name="line-517"></a>  description: "The float value that the highest quantized value represents."
<a name="line-518"></a>  name: "max_activations"
<a name="line-519"></a>  type: DT_FLOAT
<a name="line-520"></a>}
<a name="line-521"></a>-}</span>
<a name="line-522"></a>
<a name="line-523"></a><a name="quantizedBiasAdd"></a><span class='hs-comment'>-- | Adds Tensor 'bias' to Tensor 'input' for Quantized types.</span>
<a name="line-524"></a><span class='hs-comment'>--</span>
<a name="line-525"></a><span class='hs-comment'>-- Broadcasts the values of bias on dimensions 0..N-2 of 'input'.</span>
<a name="line-526"></a><span class='hs-definition'>quantizedBiasAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t1</span> <span class='hs-varid'>t2</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t1</span><span class='hs-layout'>,</span>
<a name="line-527"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-528"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-529"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-530"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t1</span><span class='hs-layout'>,</span>
<a name="line-531"></a>                                                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t2</span><span class='hs-layout'>,</span>
<a name="line-532"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-533"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-534"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-535"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t2</span><span class='hs-layout'>,</span>
<a name="line-536"></a>                                                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-537"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-538"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-539"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-540"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-541"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t1</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-542"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t2</span> <span class='hs-comment'>-- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'.</span>
<a name="line-543"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_input__: The float value that the lowest quantized input value represents.</span>
<a name="line-544"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_input__: The float value that the highest quantized input value represents.</span>
<a name="line-545"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_bias__: The float value that the lowest quantized bias value represents.</span>
<a name="line-546"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_bias__: The float value that the highest quantized bias value represents.</span>
<a name="line-547"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-548"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-549"></a>                    <span class='hs-comment'>-- ^ (__output__, __min_out__, __max_out__)</span>
<a name="line-550"></a>                    <span class='hs-comment'>--</span>
<a name="line-551"></a>                    <span class='hs-comment'>-- * __output__</span>
<a name="line-552"></a>                    <span class='hs-comment'>--</span>
<a name="line-553"></a>                    <span class='hs-comment'>-- * __min_out__: The float value that the lowest quantized output value represents.</span>
<a name="line-554"></a>                    <span class='hs-comment'>--</span>
<a name="line-555"></a>                    <span class='hs-comment'>-- * __max_out__: The float value that the highest quantized output value represents.</span>
<a name="line-556"></a><span class='hs-definition'>quantizedBiasAdd</span> <span class='hs-varid'>input</span> <span class='hs-varid'>bias</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-varid'>min_bias</span>
<a name="line-557"></a>                 <span class='hs-varid'>max_bias</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-558"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedBiasAdd"</span>
<a name="line-559"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T1"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t1</span><span class='hs-layout'>)</span>
<a name="line-560"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T2"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t2</span><span class='hs-layout'>)</span>
<a name="line-561"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-562"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>bias</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-varid'>min_bias</span> <span class='hs-varid'>max_bias</span>
<a name="line-563"></a><span class='hs-comment'>{-
<a name="line-564"></a>attr {
<a name="line-565"></a>  allowed_values {
<a name="line-566"></a>    list {
<a name="line-567"></a>      type: DT_QINT8
<a name="line-568"></a>      type: DT_QUINT8
<a name="line-569"></a>      type: DT_QINT16
<a name="line-570"></a>      type: DT_QUINT16
<a name="line-571"></a>      type: DT_QINT32
<a name="line-572"></a>    }
<a name="line-573"></a>  }
<a name="line-574"></a>  name: "T1"
<a name="line-575"></a>  type: "type"
<a name="line-576"></a>}
<a name="line-577"></a>attr {
<a name="line-578"></a>  allowed_values {
<a name="line-579"></a>    list {
<a name="line-580"></a>      type: DT_QINT8
<a name="line-581"></a>      type: DT_QUINT8
<a name="line-582"></a>      type: DT_QINT16
<a name="line-583"></a>      type: DT_QUINT16
<a name="line-584"></a>      type: DT_QINT32
<a name="line-585"></a>    }
<a name="line-586"></a>  }
<a name="line-587"></a>  name: "T2"
<a name="line-588"></a>  type: "type"
<a name="line-589"></a>}
<a name="line-590"></a>attr {
<a name="line-591"></a>  allowed_values {
<a name="line-592"></a>    list {
<a name="line-593"></a>      type: DT_QINT8
<a name="line-594"></a>      type: DT_QUINT8
<a name="line-595"></a>      type: DT_QINT16
<a name="line-596"></a>      type: DT_QUINT16
<a name="line-597"></a>      type: DT_QINT32
<a name="line-598"></a>    }
<a name="line-599"></a>  }
<a name="line-600"></a>  name: "out_type"
<a name="line-601"></a>  type: "type"
<a name="line-602"></a>}
<a name="line-603"></a>input_arg { name: "input" type_attr: "T1" }
<a name="line-604"></a>input_arg {
<a name="line-605"></a>  description: "A 1D bias Tensor with size matching the last dimension of \'input\'."
<a name="line-606"></a>  name: "bias"
<a name="line-607"></a>  type_attr: "T2"
<a name="line-608"></a>}
<a name="line-609"></a>input_arg {
<a name="line-610"></a>  description: "The float value that the lowest quantized input value represents."
<a name="line-611"></a>  name: "min_input"
<a name="line-612"></a>  type: DT_FLOAT
<a name="line-613"></a>}
<a name="line-614"></a>input_arg {
<a name="line-615"></a>  description: "The float value that the highest quantized input value represents."
<a name="line-616"></a>  name: "max_input"
<a name="line-617"></a>  type: DT_FLOAT
<a name="line-618"></a>}
<a name="line-619"></a>input_arg {
<a name="line-620"></a>  description: "The float value that the lowest quantized bias value represents."
<a name="line-621"></a>  name: "min_bias"
<a name="line-622"></a>  type: DT_FLOAT
<a name="line-623"></a>}
<a name="line-624"></a>input_arg {
<a name="line-625"></a>  description: "The float value that the highest quantized bias value represents."
<a name="line-626"></a>  name: "max_bias"
<a name="line-627"></a>  type: DT_FLOAT
<a name="line-628"></a>}
<a name="line-629"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-630"></a>output_arg {
<a name="line-631"></a>  description: "The float value that the lowest quantized output value represents."
<a name="line-632"></a>  name: "min_out"
<a name="line-633"></a>  type: DT_FLOAT
<a name="line-634"></a>}
<a name="line-635"></a>output_arg {
<a name="line-636"></a>  description: "The float value that the highest quantized output value represents."
<a name="line-637"></a>  name: "max_out"
<a name="line-638"></a>  type: DT_FLOAT
<a name="line-639"></a>}
<a name="line-640"></a>-}</span>
<a name="line-641"></a>
<a name="line-642"></a><a name="fractionalAvgPoolGrad"></a><span class='hs-comment'>-- | Computes gradient of the FractionalAvgPool function.</span>
<a name="line-643"></a><span class='hs-comment'>--</span>
<a name="line-644"></a><span class='hs-comment'>-- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for</span>
<a name="line-645"></a><span class='hs-comment'>-- FractionalAvgPoolGrad, we just need to evenly back-propagate each element of</span>
<a name="line-646"></a><span class='hs-comment'>-- out_backprop to those indices that form the same pooling cell. Therefore, we</span>
<a name="line-647"></a><span class='hs-comment'>-- just need to know the shape of original input tensor, instead of the whole</span>
<a name="line-648"></a><span class='hs-comment'>-- tensor.</span>
<a name="line-649"></a><span class='hs-definition'>fractionalAvgPoolGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-650"></a>                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-651"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-652"></a>                                                         <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-653"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool`</span>
<a name="line-654"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients</span>
<a name="line-655"></a>                                        <span class='hs-comment'>-- w.r.t. the output of `fractional_avg_pool`.</span>
<a name="line-656"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with</span>
<a name="line-657"></a>                                                     <span class='hs-comment'>-- col_pooling_sequence.</span>
<a name="line-658"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with</span>
<a name="line-659"></a>                                                     <span class='hs-comment'>-- row_pooling sequence.</span>
<a name="line-660"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.</span>
<a name="line-661"></a><span class='hs-definition'>fractionalAvgPoolGrad</span> <span class='hs-varid'>orig_input_tensor_shape</span> <span class='hs-varid'>out_backprop</span> <span class='hs-varid'>row_pooling_sequence</span>
<a name="line-662"></a>                      <span class='hs-varid'>col_pooling_sequence</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-663"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FractionalAvgPoolGrad"</span>
<a name="line-664"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-665"></a>        <span class='hs-varid'>orig_input_tensor_shape</span> <span class='hs-varid'>out_backprop</span> <span class='hs-varid'>row_pooling_sequence</span>
<a name="line-666"></a>        <span class='hs-varid'>col_pooling_sequence</span>
<a name="line-667"></a><span class='hs-comment'>{-
<a name="line-668"></a>attr {
<a name="line-669"></a>  default_value { b: false }
<a name="line-670"></a>  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
<a name="line-671"></a>  name: "overlapping"
<a name="line-672"></a>  type: "bool"
<a name="line-673"></a>}
<a name="line-674"></a>attr {
<a name="line-675"></a>  allowed_values {
<a name="line-676"></a>    list {
<a name="line-677"></a>      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
<a name="line-678"></a>    }
<a name="line-679"></a>  }
<a name="line-680"></a>  name: "T"
<a name="line-681"></a>  type: "type"
<a name="line-682"></a>}
<a name="line-683"></a>input_arg {
<a name="line-684"></a>  description: "Original input tensor shape for `fractional_avg_pool`"
<a name="line-685"></a>  name: "orig_input_tensor_shape"
<a name="line-686"></a>  type: DT_INT64
<a name="line-687"></a>}
<a name="line-688"></a>input_arg {
<a name="line-689"></a>  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_avg_pool`."
<a name="line-690"></a>  name: "out_backprop"
<a name="line-691"></a>  type_attr: "T"
<a name="line-692"></a>}
<a name="line-693"></a>input_arg {
<a name="line-694"></a>  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
<a name="line-695"></a>  name: "row_pooling_sequence"
<a name="line-696"></a>  type: DT_INT64
<a name="line-697"></a>}
<a name="line-698"></a>input_arg {
<a name="line-699"></a>  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
<a name="line-700"></a>  name: "col_pooling_sequence"
<a name="line-701"></a>  type: DT_INT64
<a name="line-702"></a>}
<a name="line-703"></a>output_arg {
<a name="line-704"></a>  description: "4-D.  Gradients w.r.t. the input of `fractional_avg_pool`."
<a name="line-705"></a>  name: "output"
<a name="line-706"></a>  type_attr: "T"
<a name="line-707"></a>}
<a name="line-708"></a>-}</span>
<a name="line-709"></a>
<a name="line-710"></a><span class='hs-comment'>-- | Computes gradient of the FractionalMaxPool function.</span>
<a name="line-711"></a>
<a name="line-712"></a><a name="fractionalMaxPoolGrad"></a><span class='hs-definition'>fractionalMaxPoolGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-713"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-714"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-715"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-716"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __orig_input__: Original input for `fractional_max_pool`</span>
<a name="line-717"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __orig_output__: Original output for `fractional_max_pool`</span>
<a name="line-718"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients</span>
<a name="line-719"></a>                                        <span class='hs-comment'>-- w.r.t. the output of `fractional_max_pool`.</span>
<a name="line-720"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with</span>
<a name="line-721"></a>                                                     <span class='hs-comment'>-- col_pooling_sequence.</span>
<a name="line-722"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with</span>
<a name="line-723"></a>                                                     <span class='hs-comment'>-- row_pooling sequence.</span>
<a name="line-724"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.</span>
<a name="line-725"></a><span class='hs-definition'>fractionalMaxPoolGrad</span> <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>out_backprop</span> <span class='hs-varid'>row_pooling_sequence</span>
<a name="line-726"></a>                      <span class='hs-varid'>col_pooling_sequence</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-727"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FractionalMaxPoolGrad"</span>
<a name="line-728"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-729"></a>        <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>out_backprop</span> <span class='hs-varid'>row_pooling_sequence</span>
<a name="line-730"></a>        <span class='hs-varid'>col_pooling_sequence</span>
<a name="line-731"></a><span class='hs-comment'>{-
<a name="line-732"></a>attr {
<a name="line-733"></a>  default_value { b: false }
<a name="line-734"></a>  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
<a name="line-735"></a>  name: "overlapping"
<a name="line-736"></a>  type: "bool"
<a name="line-737"></a>}
<a name="line-738"></a>attr {
<a name="line-739"></a>  allowed_values {
<a name="line-740"></a>    list {
<a name="line-741"></a>      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
<a name="line-742"></a>    }
<a name="line-743"></a>  }
<a name="line-744"></a>  name: "T"
<a name="line-745"></a>  type: "type"
<a name="line-746"></a>}
<a name="line-747"></a>input_arg {
<a name="line-748"></a>  description: "Original input for `fractional_max_pool`"
<a name="line-749"></a>  name: "orig_input"
<a name="line-750"></a>  type_attr: "T"
<a name="line-751"></a>}
<a name="line-752"></a>input_arg {
<a name="line-753"></a>  description: "Original output for `fractional_max_pool`"
<a name="line-754"></a>  name: "orig_output"
<a name="line-755"></a>  type_attr: "T"
<a name="line-756"></a>}
<a name="line-757"></a>input_arg {
<a name="line-758"></a>  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_max_pool`."
<a name="line-759"></a>  name: "out_backprop"
<a name="line-760"></a>  type_attr: "T"
<a name="line-761"></a>}
<a name="line-762"></a>input_arg {
<a name="line-763"></a>  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
<a name="line-764"></a>  name: "row_pooling_sequence"
<a name="line-765"></a>  type: DT_INT64
<a name="line-766"></a>}
<a name="line-767"></a>input_arg {
<a name="line-768"></a>  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
<a name="line-769"></a>  name: "col_pooling_sequence"
<a name="line-770"></a>  type: DT_INT64
<a name="line-771"></a>}
<a name="line-772"></a>output_arg {
<a name="line-773"></a>  description: "4-D.  Gradients w.r.t. the input of `fractional_max_pool`."
<a name="line-774"></a>  name: "output"
<a name="line-775"></a>  type_attr: "T"
<a name="line-776"></a>}
<a name="line-777"></a>-}</span>
<a name="line-778"></a>
<a name="line-779"></a><a name="fractionalMaxPool"></a><span class='hs-comment'>-- | Performs fractional max pooling on the input.</span>
<a name="line-780"></a><span class='hs-comment'>--</span>
<a name="line-781"></a><span class='hs-comment'>-- Fractional max pooling is slightly different than regular max pooling.  In</span>
<a name="line-782"></a><span class='hs-comment'>-- regular max pooling, you downsize an input set by taking the maximum value of</span>
<a name="line-783"></a><span class='hs-comment'>-- smaller N x N subsections of the set (often 2x2), and try to reduce the set by</span>
<a name="line-784"></a><span class='hs-comment'>-- a factor of N, where N is an integer.  Fractional max pooling, as you might</span>
<a name="line-785"></a><span class='hs-comment'>-- expect from the word "fractional", means that the overall reduction ratio N</span>
<a name="line-786"></a><span class='hs-comment'>-- does not have to be an integer.</span>
<a name="line-787"></a><span class='hs-comment'>-- </span>
<a name="line-788"></a><span class='hs-comment'>-- The sizes of the pooling regions are generated randomly but are fairly uniform.</span>
<a name="line-789"></a><span class='hs-comment'>-- For example, let's look at the height dimension, and the constraints on the</span>
<a name="line-790"></a><span class='hs-comment'>-- list of rows that will be pool boundaries.</span>
<a name="line-791"></a><span class='hs-comment'>-- </span>
<a name="line-792"></a><span class='hs-comment'>-- First we define the following:</span>
<a name="line-793"></a><span class='hs-comment'>-- </span>
<a name="line-794"></a><span class='hs-comment'>-- 1.  input_row_length : the number of rows from the input set</span>
<a name="line-795"></a><span class='hs-comment'>-- 2.  output_row_length : which will be smaller than the input</span>
<a name="line-796"></a><span class='hs-comment'>-- 3.  alpha = input_row_length / output_row_length : our reduction ratio</span>
<a name="line-797"></a><span class='hs-comment'>-- 4.  K = floor(alpha)</span>
<a name="line-798"></a><span class='hs-comment'>-- 5.  row_pooling_sequence : this is the result list of pool boundary rows</span>
<a name="line-799"></a><span class='hs-comment'>-- </span>
<a name="line-800"></a><span class='hs-comment'>-- Then, row_pooling_sequence should satisfy:</span>
<a name="line-801"></a><span class='hs-comment'>-- </span>
<a name="line-802"></a><span class='hs-comment'>-- 1.  a[0] = 0 : the first value of the sequence is 0</span>
<a name="line-803"></a><span class='hs-comment'>-- 2.  a[end] = input_row_length : the last value of the sequence is the size</span>
<a name="line-804"></a><span class='hs-comment'>-- 3.  K &lt;= (a[i+1] - a[i]) &lt;= K+1 : all intervals are K or K+1 size</span>
<a name="line-805"></a><span class='hs-comment'>-- 4.  length(row_pooling_sequence) = output_row_length+1</span>
<a name="line-806"></a><span class='hs-comment'>-- </span>
<a name="line-807"></a><span class='hs-comment'>-- For more details on fractional max pooling, see this paper:</span>
<a name="line-808"></a><span class='hs-comment'>-- [Benjamin Graham, Fractional Max-Pooling](<a href="http://arxiv.org/abs/1412.6071)">http://arxiv.org/abs/1412.6071)</a></span>
<a name="line-809"></a><span class='hs-definition'>fractionalMaxPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-810"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-811"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-812"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-813"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-814"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-815"></a>                     <span class='hs-comment'>-- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)</span>
<a name="line-816"></a>                     <span class='hs-comment'>--</span>
<a name="line-817"></a>                     <span class='hs-comment'>-- * __output__: output tensor after fractional max pooling.</span>
<a name="line-818"></a>                     <span class='hs-comment'>--</span>
<a name="line-819"></a>                     <span class='hs-comment'>-- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.</span>
<a name="line-820"></a>                     <span class='hs-comment'>--</span>
<a name="line-821"></a>                     <span class='hs-comment'>-- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.</span>
<a name="line-822"></a><span class='hs-definition'>fractionalMaxPool</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-823"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FractionalMaxPool"</span>
<a name="line-824"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-825"></a>        <span class='hs-varid'>value</span>
<a name="line-826"></a><span class='hs-comment'>{-
<a name="line-827"></a>attr {
<a name="line-828"></a>  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be &gt;= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
<a name="line-829"></a>  has_minimum: true
<a name="line-830"></a>  minimum: 4
<a name="line-831"></a>  name: "pooling_ratio"
<a name="line-832"></a>  type: "list(float)"
<a name="line-833"></a>}
<a name="line-834"></a>attr {
<a name="line-835"></a>  default_value { b: false }
<a name="line-836"></a>  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](<a href="http://arxiv.org/abs/1412.6071)">http://arxiv.org/abs/1412.6071)</a> for\ndifference between pseudorandom and random."
<a name="line-837"></a>  name: "pseudo_random"
<a name="line-838"></a>  type: "bool"
<a name="line-839"></a>}
<a name="line-840"></a>attr {
<a name="line-841"></a>  default_value { b: false }
<a name="line-842"></a>  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
<a name="line-843"></a>  name: "overlapping"
<a name="line-844"></a>  type: "bool"
<a name="line-845"></a>}
<a name="line-846"></a>attr {
<a name="line-847"></a>  default_value { b: false }
<a name="line-848"></a>  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic."
<a name="line-849"></a>  name: "deterministic"
<a name="line-850"></a>  type: "bool"
<a name="line-851"></a>}
<a name="line-852"></a>attr {
<a name="line-853"></a>  default_value { i: 0 }
<a name="line-854"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-855"></a>  name: "seed"
<a name="line-856"></a>  type: "int"
<a name="line-857"></a>}
<a name="line-858"></a>attr {
<a name="line-859"></a>  default_value { i: 0 }
<a name="line-860"></a>  description: "An second seed to avoid seed collision."
<a name="line-861"></a>  name: "seed2"
<a name="line-862"></a>  type: "int"
<a name="line-863"></a>}
<a name="line-864"></a>attr {
<a name="line-865"></a>  allowed_values {
<a name="line-866"></a>    list {
<a name="line-867"></a>      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
<a name="line-868"></a>    }
<a name="line-869"></a>  }
<a name="line-870"></a>  name: "T"
<a name="line-871"></a>  type: "type"
<a name="line-872"></a>}
<a name="line-873"></a>input_arg {
<a name="line-874"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-875"></a>  name: "value"
<a name="line-876"></a>  type_attr: "T"
<a name="line-877"></a>}
<a name="line-878"></a>output_arg {
<a name="line-879"></a>  description: "output tensor after fractional max pooling."
<a name="line-880"></a>  name: "output"
<a name="line-881"></a>  type_attr: "T"
<a name="line-882"></a>}
<a name="line-883"></a>output_arg {
<a name="line-884"></a>  description: "row pooling sequence, needed to calculate gradient."
<a name="line-885"></a>  name: "row_pooling_sequence"
<a name="line-886"></a>  type: DT_INT64
<a name="line-887"></a>}
<a name="line-888"></a>output_arg {
<a name="line-889"></a>  description: "column pooling sequence, needed to calculate gradient."
<a name="line-890"></a>  name: "col_pooling_sequence"
<a name="line-891"></a>  type: DT_INT64
<a name="line-892"></a>}
<a name="line-893"></a>-}</span>
<a name="line-894"></a>
<a name="line-895"></a><a name="topK"></a><span class='hs-comment'>-- | Finds values and indices of the `k` largest elements for the last dimension.</span>
<a name="line-896"></a><span class='hs-comment'>--</span>
<a name="line-897"></a><span class='hs-comment'>-- If the input is a vector (rank-1), finds the `k` largest entries in the vector</span>
<a name="line-898"></a><span class='hs-comment'>-- and outputs their values and indices as vectors.  Thus `values[j]` is the</span>
<a name="line-899"></a><span class='hs-comment'>-- `j`-th largest entry in `input`, and its index is `indices[j]`.</span>
<a name="line-900"></a><span class='hs-comment'>-- </span>
<a name="line-901"></a><span class='hs-comment'>-- For matrices (resp. higher rank input), computes the top `k` entries in each</span>
<a name="line-902"></a><span class='hs-comment'>-- row (resp. vector along the last dimension).  Thus,</span>
<a name="line-903"></a><span class='hs-comment'>-- </span>
<a name="line-904"></a><span class='hs-comment'>--     values.shape = indices.shape = input.shape[:-1] + [k]</span>
<a name="line-905"></a><span class='hs-comment'>-- </span>
<a name="line-906"></a><span class='hs-comment'>-- If two elements are equal, the lower-index element appears first.</span>
<a name="line-907"></a><span class='hs-comment'>-- </span>
<a name="line-908"></a><span class='hs-comment'>-- If `k` varies dynamically, use `TopKV2` below.</span>
<a name="line-909"></a><span class='hs-definition'>topK</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-910"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-911"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-912"></a>                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-913"></a>        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __k__: Number of top elements to look for along the last dimension (along each</span>
<a name="line-914"></a>                       <span class='hs-comment'>-- row for matrices).</span>
<a name="line-915"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 1-D or higher with last dimension at least `k`.</span>
<a name="line-916"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span>
<a name="line-917"></a>        <span class='hs-comment'>-- ^ (__values__, __indices__)</span>
<a name="line-918"></a>        <span class='hs-comment'>--</span>
<a name="line-919"></a>        <span class='hs-comment'>-- * __values__: The `k` largest elements along each last dimensional slice.</span>
<a name="line-920"></a>        <span class='hs-comment'>--</span>
<a name="line-921"></a>        <span class='hs-comment'>-- * __indices__: The indices of `values` within the last dimension of `input`.</span>
<a name="line-922"></a><span class='hs-definition'>topK</span> <span class='hs-varid'>k</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-923"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TopK"</span>
<a name="line-924"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-925"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"k"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>k</span><span class='hs-layout'>)</span>
<a name="line-926"></a>        <span class='hs-varid'>input</span>
<a name="line-927"></a><span class='hs-comment'>{-
<a name="line-928"></a>attr {
<a name="line-929"></a>  description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)."
<a name="line-930"></a>  has_minimum: true
<a name="line-931"></a>  name: "k"
<a name="line-932"></a>  type: "int"
<a name="line-933"></a>}
<a name="line-934"></a>attr {
<a name="line-935"></a>  default_value { b: true }
<a name="line-936"></a>  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
<a name="line-937"></a>  name: "sorted"
<a name="line-938"></a>  type: "bool"
<a name="line-939"></a>}
<a name="line-940"></a>attr {
<a name="line-941"></a>  allowed_values {
<a name="line-942"></a>    list {
<a name="line-943"></a>      type: DT_FLOAT
<a name="line-944"></a>      type: DT_DOUBLE
<a name="line-945"></a>      type: DT_INT32
<a name="line-946"></a>      type: DT_INT64
<a name="line-947"></a>      type: DT_UINT8
<a name="line-948"></a>      type: DT_INT16
<a name="line-949"></a>      type: DT_INT8
<a name="line-950"></a>      type: DT_UINT16
<a name="line-951"></a>      type: DT_HALF
<a name="line-952"></a>    }
<a name="line-953"></a>  }
<a name="line-954"></a>  name: "T"
<a name="line-955"></a>  type: "type"
<a name="line-956"></a>}
<a name="line-957"></a>input_arg {
<a name="line-958"></a>  description: "1-D or higher with last dimension at least `k`."
<a name="line-959"></a>  name: "input"
<a name="line-960"></a>  type_attr: "T"
<a name="line-961"></a>}
<a name="line-962"></a>output_arg {
<a name="line-963"></a>  description: "The `k` largest elements along each last dimensional slice."
<a name="line-964"></a>  name: "values"
<a name="line-965"></a>  type_attr: "T"
<a name="line-966"></a>}
<a name="line-967"></a>output_arg {
<a name="line-968"></a>  description: "The indices of `values` within the last dimension of `input`."
<a name="line-969"></a>  name: "indices"
<a name="line-970"></a>  type: DT_INT32
<a name="line-971"></a>}
<a name="line-972"></a>-}</span>
<a name="line-973"></a>
<a name="line-974"></a><a name="inTopK"></a><span class='hs-comment'>-- | Says whether the targets are in the top `K` predictions.</span>
<a name="line-975"></a><span class='hs-comment'>--</span>
<a name="line-976"></a><span class='hs-comment'>-- This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the</span>
<a name="line-977"></a><span class='hs-comment'>-- prediction for the target class is among the top `k` predictions among</span>
<a name="line-978"></a><span class='hs-comment'>-- all predictions for example `i`. Note that the behavior of `InTopK` differs</span>
<a name="line-979"></a><span class='hs-comment'>-- from the `TopK` op in its handling of ties; if multiple classes have the</span>
<a name="line-980"></a><span class='hs-comment'>-- same prediction value and straddle the top-`k` boundary, all of those</span>
<a name="line-981"></a><span class='hs-comment'>-- classes are considered to be in the top `k`.</span>
<a name="line-982"></a><span class='hs-comment'>-- </span>
<a name="line-983"></a><span class='hs-comment'>-- More formally, let</span>
<a name="line-984"></a><span class='hs-comment'>-- </span>
<a name="line-985"></a><span class='hs-comment'>--   \\(predictions_i\\) be the predictions for all classes for example `i`,</span>
<a name="line-986"></a><span class='hs-comment'>--   \\(targets_i\\) be the target class for example `i`,</span>
<a name="line-987"></a><span class='hs-comment'>--   \\(out_i\\) be the output for example `i`,</span>
<a name="line-988"></a><span class='hs-comment'>-- </span>
<a name="line-989"></a><span class='hs-comment'>-- $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$</span>
<a name="line-990"></a><span class='hs-definition'>inTopK</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-991"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-992"></a>          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __k__: Number of top elements to look at for computing precision.</span>
<a name="line-993"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __predictions__: A `batch_size` x `classes` tensor.</span>
<a name="line-994"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __targets__: A `batch_size` vector of class ids.</span>
<a name="line-995"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __precision__: Computed Precision at `k` as a `bool Tensor`.</span>
<a name="line-996"></a><span class='hs-definition'>inTopK</span> <span class='hs-varid'>k</span> <span class='hs-varid'>predictions</span> <span class='hs-varid'>targets</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-997"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"InTopK"</span>
<a name="line-998"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-999"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"k"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>k</span><span class='hs-layout'>)</span>
<a name="line-1000"></a>        <span class='hs-varid'>predictions</span> <span class='hs-varid'>targets</span>
<a name="line-1001"></a><span class='hs-comment'>{-
<a name="line-1002"></a>attr {
<a name="line-1003"></a>  description: "Number of top elements to look at for computing precision."
<a name="line-1004"></a>  name: "k"
<a name="line-1005"></a>  type: "int"
<a name="line-1006"></a>}
<a name="line-1007"></a>attr {
<a name="line-1008"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-1009"></a>  default_value { type: DT_INT32 }
<a name="line-1010"></a>  name: "T"
<a name="line-1011"></a>  type: "type"
<a name="line-1012"></a>}
<a name="line-1013"></a>input_arg {
<a name="line-1014"></a>  description: "A `batch_size` x `classes` tensor."
<a name="line-1015"></a>  name: "predictions"
<a name="line-1016"></a>  type: DT_FLOAT
<a name="line-1017"></a>}
<a name="line-1018"></a>input_arg {
<a name="line-1019"></a>  description: "A `batch_size` vector of class ids."
<a name="line-1020"></a>  name: "targets"
<a name="line-1021"></a>  type_attr: "T"
<a name="line-1022"></a>}
<a name="line-1023"></a>output_arg {
<a name="line-1024"></a>  description: "Computed Precision at `k` as a `bool Tensor`."
<a name="line-1025"></a>  name: "precision"
<a name="line-1026"></a>  type: DT_BOOL
<a name="line-1027"></a>}
<a name="line-1028"></a>-}</span>
<a name="line-1029"></a>
<a name="line-1030"></a><a name="sparseSoftmaxCrossEntropyWithLogits"></a><span class='hs-comment'>-- | Computes softmax cross entropy cost and gradients to backpropagate.</span>
<a name="line-1031"></a><span class='hs-comment'>--</span>
<a name="line-1032"></a><span class='hs-comment'>-- Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept</span>
<a name="line-1033"></a><span class='hs-comment'>-- a matrix of label probabilities, but rather a single label per row</span>
<a name="line-1034"></a><span class='hs-comment'>-- of features.  This label is considered to have probability 1.0 for the</span>
<a name="line-1035"></a><span class='hs-comment'>-- given row.</span>
<a name="line-1036"></a><span class='hs-comment'>-- </span>
<a name="line-1037"></a><span class='hs-comment'>-- Inputs are the logits, not probabilities.</span>
<a name="line-1038"></a><span class='hs-definition'>sparseSoftmaxCrossEntropyWithLogits</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tlabels</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1039"></a>                                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1040"></a>                                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1041"></a>                                                                         <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1042"></a>                                                                 <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tlabels</span><span class='hs-layout'>,</span>
<a name="line-1043"></a>                                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1044"></a>                                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tlabels</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1045"></a>                                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: batch_size x num_classes matrix</span>
<a name="line-1046"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tlabels</span> <span class='hs-comment'>-- ^ __labels__: batch_size vector with values in [0, num_classes).</span>
<a name="line-1047"></a>                                                            <span class='hs-comment'>-- This is the label for the given minibatch entry.</span>
<a name="line-1048"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-1049"></a>                                       <span class='hs-comment'>-- ^ (__loss__, __backprop__)</span>
<a name="line-1050"></a>                                       <span class='hs-comment'>--</span>
<a name="line-1051"></a>                                       <span class='hs-comment'>-- * __loss__: Per example loss (batch_size vector).</span>
<a name="line-1052"></a>                                       <span class='hs-comment'>--</span>
<a name="line-1053"></a>                                       <span class='hs-comment'>-- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).</span>
<a name="line-1054"></a><span class='hs-definition'>sparseSoftmaxCrossEntropyWithLogits</span> <span class='hs-varid'>features</span> <span class='hs-varid'>labels</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1055"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSoftmaxCrossEntropyWithLogits"</span>
<a name="line-1056"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-1057"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tlabels"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tlabels</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1058"></a>        <span class='hs-varid'>features</span> <span class='hs-varid'>labels</span>
<a name="line-1059"></a><span class='hs-comment'>{-
<a name="line-1060"></a>attr {
<a name="line-1061"></a>  allowed_values {
<a name="line-1062"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-1063"></a>  }
<a name="line-1064"></a>  name: "T"
<a name="line-1065"></a>  type: "type"
<a name="line-1066"></a>}
<a name="line-1067"></a>attr {
<a name="line-1068"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-1069"></a>  default_value { type: DT_INT64 }
<a name="line-1070"></a>  name: "Tlabels"
<a name="line-1071"></a>  type: "type"
<a name="line-1072"></a>}
<a name="line-1073"></a>input_arg {
<a name="line-1074"></a>  description: "batch_size x num_classes matrix"
<a name="line-1075"></a>  name: "features"
<a name="line-1076"></a>  type_attr: "T"
<a name="line-1077"></a>}
<a name="line-1078"></a>input_arg {
<a name="line-1079"></a>  description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry."
<a name="line-1080"></a>  name: "labels"
<a name="line-1081"></a>  type_attr: "Tlabels"
<a name="line-1082"></a>}
<a name="line-1083"></a>output_arg {
<a name="line-1084"></a>  description: "Per example loss (batch_size vector)."
<a name="line-1085"></a>  name: "loss"
<a name="line-1086"></a>  type_attr: "T"
<a name="line-1087"></a>}
<a name="line-1088"></a>output_arg {
<a name="line-1089"></a>  description: "backpropagated gradients (batch_size x num_classes matrix)."
<a name="line-1090"></a>  name: "backprop"
<a name="line-1091"></a>  type_attr: "T"
<a name="line-1092"></a>}
<a name="line-1093"></a>-}</span>
<a name="line-1094"></a>
<a name="line-1095"></a><a name="softmaxCrossEntropyWithLogits"></a><span class='hs-comment'>-- | Computes softmax cross entropy cost and gradients to backpropagate.</span>
<a name="line-1096"></a><span class='hs-comment'>--</span>
<a name="line-1097"></a><span class='hs-comment'>-- Inputs are the logits, not probabilities.</span>
<a name="line-1098"></a><span class='hs-definition'>softmaxCrossEntropyWithLogits</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1099"></a>                                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1100"></a>                                                           <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1101"></a>                                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: batch_size x num_classes matrix</span>
<a name="line-1102"></a>                                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __labels__: batch_size x num_classes matrix</span>
<a name="line-1103"></a>                                                <span class='hs-comment'>-- The caller must ensure that each batch of labels represents a valid</span>
<a name="line-1104"></a>                                                <span class='hs-comment'>-- probability distribution.</span>
<a name="line-1105"></a>                                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-1106"></a>                                 <span class='hs-comment'>-- ^ (__loss__, __backprop__)</span>
<a name="line-1107"></a>                                 <span class='hs-comment'>--</span>
<a name="line-1108"></a>                                 <span class='hs-comment'>-- * __loss__: Per example loss (batch_size vector).</span>
<a name="line-1109"></a>                                 <span class='hs-comment'>--</span>
<a name="line-1110"></a>                                 <span class='hs-comment'>-- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).</span>
<a name="line-1111"></a><span class='hs-definition'>softmaxCrossEntropyWithLogits</span> <span class='hs-varid'>features</span> <span class='hs-varid'>labels</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1112"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SoftmaxCrossEntropyWithLogits"</span>
<a name="line-1113"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1114"></a>        <span class='hs-varid'>features</span> <span class='hs-varid'>labels</span>
<a name="line-1115"></a><span class='hs-comment'>{-
<a name="line-1116"></a>attr {
<a name="line-1117"></a>  allowed_values {
<a name="line-1118"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-1119"></a>  }
<a name="line-1120"></a>  name: "T"
<a name="line-1121"></a>  type: "type"
<a name="line-1122"></a>}
<a name="line-1123"></a>input_arg {
<a name="line-1124"></a>  description: "batch_size x num_classes matrix"
<a name="line-1125"></a>  name: "features"
<a name="line-1126"></a>  type_attr: "T"
<a name="line-1127"></a>}
<a name="line-1128"></a>input_arg {
<a name="line-1129"></a>  description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution."
<a name="line-1130"></a>  name: "labels"
<a name="line-1131"></a>  type_attr: "T"
<a name="line-1132"></a>}
<a name="line-1133"></a>output_arg {
<a name="line-1134"></a>  description: "Per example loss (batch_size vector)."
<a name="line-1135"></a>  name: "loss"
<a name="line-1136"></a>  type_attr: "T"
<a name="line-1137"></a>}
<a name="line-1138"></a>output_arg {
<a name="line-1139"></a>  description: "backpropagated gradients (batch_size x num_classes matrix)."
<a name="line-1140"></a>  name: "backprop"
<a name="line-1141"></a>  type_attr: "T"
<a name="line-1142"></a>}
<a name="line-1143"></a>-}</span>
<a name="line-1144"></a>
<a name="line-1145"></a><a name="logSoftmax"></a><span class='hs-comment'>-- | Computes log softmax activations.</span>
<a name="line-1146"></a><span class='hs-comment'>--</span>
<a name="line-1147"></a><span class='hs-comment'>-- For each batch `i` and class `j` we have</span>
<a name="line-1148"></a><span class='hs-comment'>-- </span>
<a name="line-1149"></a><span class='hs-comment'>--     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))</span>
<a name="line-1150"></a><span class='hs-definition'>logSoftmax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1151"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1152"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.</span>
<a name="line-1153"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __logsoftmax__: Same shape as `logits`.</span>
<a name="line-1154"></a><span class='hs-definition'>logSoftmax</span> <span class='hs-varid'>logits</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1155"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LogSoftmax"</span>
<a name="line-1156"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1157"></a>        <span class='hs-varid'>logits</span>
<a name="line-1158"></a><span class='hs-comment'>{-
<a name="line-1159"></a>attr {
<a name="line-1160"></a>  allowed_values {
<a name="line-1161"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-1162"></a>  }
<a name="line-1163"></a>  name: "T"
<a name="line-1164"></a>  type: "type"
<a name="line-1165"></a>}
<a name="line-1166"></a>input_arg {
<a name="line-1167"></a>  description: "2-D with shape `[batch_size, num_classes]`."
<a name="line-1168"></a>  name: "logits"
<a name="line-1169"></a>  type_attr: "T"
<a name="line-1170"></a>}
<a name="line-1171"></a>output_arg {
<a name="line-1172"></a>  description: "Same shape as `logits`."
<a name="line-1173"></a>  name: "logsoftmax"
<a name="line-1174"></a>  type_attr: "T"
<a name="line-1175"></a>}
<a name="line-1176"></a>-}</span>
<a name="line-1177"></a>
<a name="line-1178"></a><span class='hs-comment'>-- | Computes softsign gradients for a softsign operation.</span>
<a name="line-1179"></a>
<a name="line-1180"></a><a name="softsignGrad"></a><span class='hs-definition'>softsignGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1181"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1182"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1183"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1184"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1185"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1186"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1187"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation.</span>
<a name="line-1188"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: The features passed as input to the corresponding softsign operation.</span>
<a name="line-1189"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`.</span>
<a name="line-1190"></a><span class='hs-definition'>softsignGrad</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1191"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SoftsignGrad"</span>
<a name="line-1192"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1193"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span>
<a name="line-1194"></a><span class='hs-comment'>{-
<a name="line-1195"></a>attr {
<a name="line-1196"></a>  allowed_values {
<a name="line-1197"></a>    list {
<a name="line-1198"></a>      type: DT_FLOAT
<a name="line-1199"></a>      type: DT_DOUBLE
<a name="line-1200"></a>      type: DT_INT32
<a name="line-1201"></a>      type: DT_INT64
<a name="line-1202"></a>      type: DT_UINT8
<a name="line-1203"></a>      type: DT_INT16
<a name="line-1204"></a>      type: DT_INT8
<a name="line-1205"></a>      type: DT_UINT16
<a name="line-1206"></a>      type: DT_HALF
<a name="line-1207"></a>    }
<a name="line-1208"></a>  }
<a name="line-1209"></a>  name: "T"
<a name="line-1210"></a>  type: "type"
<a name="line-1211"></a>}
<a name="line-1212"></a>input_arg {
<a name="line-1213"></a>  description: "The backpropagated gradients to the corresponding softsign operation."
<a name="line-1214"></a>  name: "gradients"
<a name="line-1215"></a>  type_attr: "T"
<a name="line-1216"></a>}
<a name="line-1217"></a>input_arg {
<a name="line-1218"></a>  description: "The features passed as input to the corresponding softsign operation."
<a name="line-1219"></a>  name: "features"
<a name="line-1220"></a>  type_attr: "T"
<a name="line-1221"></a>}
<a name="line-1222"></a>output_arg {
<a name="line-1223"></a>  description: "The gradients: `gradients / (1 + abs(-features)) ** 2`."
<a name="line-1224"></a>  name: "backprops"
<a name="line-1225"></a>  type_attr: "T"
<a name="line-1226"></a>}
<a name="line-1227"></a>-}</span>
<a name="line-1228"></a>
<a name="line-1229"></a><span class='hs-comment'>-- | Computes softplus: `log(exp(features) + 1)`.</span>
<a name="line-1230"></a>
<a name="line-1231"></a><a name="softplus"></a><span class='hs-definition'>softplus</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1232"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1233"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1234"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1235"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1236"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-1237"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __activations__</span>
<a name="line-1238"></a><span class='hs-definition'>softplus</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1239"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Softplus"</span>
<a name="line-1240"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1241"></a>        <span class='hs-varid'>features</span>
<a name="line-1242"></a><span class='hs-comment'>{-
<a name="line-1243"></a>attr {
<a name="line-1244"></a>  allowed_values {
<a name="line-1245"></a>    list {
<a name="line-1246"></a>      type: DT_FLOAT
<a name="line-1247"></a>      type: DT_DOUBLE
<a name="line-1248"></a>      type: DT_INT32
<a name="line-1249"></a>      type: DT_INT64
<a name="line-1250"></a>      type: DT_UINT8
<a name="line-1251"></a>      type: DT_INT16
<a name="line-1252"></a>      type: DT_INT8
<a name="line-1253"></a>      type: DT_UINT16
<a name="line-1254"></a>      type: DT_HALF
<a name="line-1255"></a>    }
<a name="line-1256"></a>  }
<a name="line-1257"></a>  name: "T"
<a name="line-1258"></a>  type: "type"
<a name="line-1259"></a>}
<a name="line-1260"></a>input_arg { name: "features" type_attr: "T" }
<a name="line-1261"></a>output_arg { name: "activations" type_attr: "T" }
<a name="line-1262"></a>-}</span>
<a name="line-1263"></a>
<a name="line-1264"></a><span class='hs-comment'>-- | Computes gradients for the exponential linear (Elu) operation.</span>
<a name="line-1265"></a>
<a name="line-1266"></a><a name="eluGrad"></a><span class='hs-definition'>eluGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1267"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1268"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1269"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1270"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1271"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1272"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1273"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation.</span>
<a name="line-1274"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __outputs__: The outputs of the corresponding Elu operation.</span>
<a name="line-1275"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs &lt; 0,</span>
<a name="line-1276"></a>           <span class='hs-comment'>-- `gradients` otherwise.</span>
<a name="line-1277"></a><span class='hs-definition'>eluGrad</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>outputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1278"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"EluGrad"</span>
<a name="line-1279"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1280"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>outputs</span>
<a name="line-1281"></a><span class='hs-comment'>{-
<a name="line-1282"></a>attr {
<a name="line-1283"></a>  allowed_values {
<a name="line-1284"></a>    list {
<a name="line-1285"></a>      type: DT_FLOAT
<a name="line-1286"></a>      type: DT_DOUBLE
<a name="line-1287"></a>      type: DT_INT32
<a name="line-1288"></a>      type: DT_INT64
<a name="line-1289"></a>      type: DT_UINT8
<a name="line-1290"></a>      type: DT_INT16
<a name="line-1291"></a>      type: DT_INT8
<a name="line-1292"></a>      type: DT_UINT16
<a name="line-1293"></a>      type: DT_HALF
<a name="line-1294"></a>    }
<a name="line-1295"></a>  }
<a name="line-1296"></a>  name: "T"
<a name="line-1297"></a>  type: "type"
<a name="line-1298"></a>}
<a name="line-1299"></a>input_arg {
<a name="line-1300"></a>  description: "The backpropagated gradients to the corresponding Elu operation."
<a name="line-1301"></a>  name: "gradients"
<a name="line-1302"></a>  type_attr: "T"
<a name="line-1303"></a>}
<a name="line-1304"></a>input_arg {
<a name="line-1305"></a>  description: "The outputs of the corresponding Elu operation."
<a name="line-1306"></a>  name: "outputs"
<a name="line-1307"></a>  type_attr: "T"
<a name="line-1308"></a>}
<a name="line-1309"></a>output_arg {
<a name="line-1310"></a>  description: "The gradients: `gradients * (outputs + 1)` if outputs &lt; 0,\n`gradients` otherwise."
<a name="line-1311"></a>  name: "backprops"
<a name="line-1312"></a>  type_attr: "T"
<a name="line-1313"></a>}
<a name="line-1314"></a>-}</span>
<a name="line-1315"></a>
<a name="line-1316"></a><a name="elu"></a><span class='hs-comment'>-- | Computes exponential linear: `exp(features) - 1` if &lt; 0, `features` otherwise.</span>
<a name="line-1317"></a><span class='hs-comment'>--</span>
<a name="line-1318"></a><span class='hs-comment'>-- See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)</span>
<a name="line-1319"></a><span class='hs-comment'>-- ](<a href="http://arxiv.org/abs/1511.07289)">http://arxiv.org/abs/1511.07289)</a></span>
<a name="line-1320"></a><span class='hs-definition'>elu</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1321"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1322"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-1323"></a>                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1324"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-1325"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __activations__</span>
<a name="line-1326"></a><span class='hs-definition'>elu</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1327"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Elu"</span>
<a name="line-1328"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1329"></a>        <span class='hs-varid'>features</span>
<a name="line-1330"></a><span class='hs-comment'>{-
<a name="line-1331"></a>attr {
<a name="line-1332"></a>  allowed_values {
<a name="line-1333"></a>    list {
<a name="line-1334"></a>      type: DT_FLOAT
<a name="line-1335"></a>      type: DT_DOUBLE
<a name="line-1336"></a>      type: DT_INT32
<a name="line-1337"></a>      type: DT_INT64
<a name="line-1338"></a>      type: DT_UINT8
<a name="line-1339"></a>      type: DT_INT16
<a name="line-1340"></a>      type: DT_INT8
<a name="line-1341"></a>      type: DT_UINT16
<a name="line-1342"></a>      type: DT_HALF
<a name="line-1343"></a>    }
<a name="line-1344"></a>  }
<a name="line-1345"></a>  name: "T"
<a name="line-1346"></a>  type: "type"
<a name="line-1347"></a>}
<a name="line-1348"></a>input_arg { name: "features" type_attr: "T" }
<a name="line-1349"></a>output_arg { name: "activations" type_attr: "T" }
<a name="line-1350"></a>-}</span>
<a name="line-1351"></a>
<a name="line-1352"></a><span class='hs-comment'>-- | Computes rectified linear 6: `min(max(features, 0), 6)`.</span>
<a name="line-1353"></a>
<a name="line-1354"></a><a name="relu6"></a><span class='hs-definition'>relu6</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1355"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1356"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-1357"></a>                                              <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1358"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-1359"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __activations__</span>
<a name="line-1360"></a><span class='hs-definition'>relu6</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1361"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Relu6"</span>
<a name="line-1362"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1363"></a>        <span class='hs-varid'>features</span>
<a name="line-1364"></a><span class='hs-comment'>{-
<a name="line-1365"></a>attr {
<a name="line-1366"></a>  allowed_values {
<a name="line-1367"></a>    list {
<a name="line-1368"></a>      type: DT_FLOAT
<a name="line-1369"></a>      type: DT_DOUBLE
<a name="line-1370"></a>      type: DT_INT32
<a name="line-1371"></a>      type: DT_INT64
<a name="line-1372"></a>      type: DT_UINT8
<a name="line-1373"></a>      type: DT_INT16
<a name="line-1374"></a>      type: DT_INT8
<a name="line-1375"></a>      type: DT_UINT16
<a name="line-1376"></a>      type: DT_HALF
<a name="line-1377"></a>    }
<a name="line-1378"></a>  }
<a name="line-1379"></a>  name: "T"
<a name="line-1380"></a>  type: "type"
<a name="line-1381"></a>}
<a name="line-1382"></a>input_arg { name: "features" type_attr: "T" }
<a name="line-1383"></a>output_arg { name: "activations" type_attr: "T" }
<a name="line-1384"></a>-}</span>
<a name="line-1385"></a>
<a name="line-1386"></a><span class='hs-comment'>-- | Computes rectified linear gradients for a Relu operation.</span>
<a name="line-1387"></a>
<a name="line-1388"></a><a name="reluGrad"></a><span class='hs-definition'>reluGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1389"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1390"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1391"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1392"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1393"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1394"></a>                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1395"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation.</span>
<a name="line-1396"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: The features passed as input to the corresponding Relu operation, OR</span>
<a name="line-1397"></a>                           <span class='hs-comment'>-- the outputs of that operation (both work equivalently).</span>
<a name="line-1398"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprops__: `gradients * (features &gt; 0)`.</span>
<a name="line-1399"></a><span class='hs-definition'>reluGrad</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1400"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReluGrad"</span>
<a name="line-1401"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1402"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span>
<a name="line-1403"></a><span class='hs-comment'>{-
<a name="line-1404"></a>attr {
<a name="line-1405"></a>  allowed_values {
<a name="line-1406"></a>    list {
<a name="line-1407"></a>      type: DT_FLOAT
<a name="line-1408"></a>      type: DT_DOUBLE
<a name="line-1409"></a>      type: DT_INT32
<a name="line-1410"></a>      type: DT_INT64
<a name="line-1411"></a>      type: DT_UINT8
<a name="line-1412"></a>      type: DT_INT16
<a name="line-1413"></a>      type: DT_INT8
<a name="line-1414"></a>      type: DT_UINT16
<a name="line-1415"></a>      type: DT_HALF
<a name="line-1416"></a>    }
<a name="line-1417"></a>  }
<a name="line-1418"></a>  name: "T"
<a name="line-1419"></a>  type: "type"
<a name="line-1420"></a>}
<a name="line-1421"></a>input_arg {
<a name="line-1422"></a>  description: "The backpropagated gradients to the corresponding Relu operation."
<a name="line-1423"></a>  name: "gradients"
<a name="line-1424"></a>  type_attr: "T"
<a name="line-1425"></a>}
<a name="line-1426"></a>input_arg {
<a name="line-1427"></a>  description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)."
<a name="line-1428"></a>  name: "features"
<a name="line-1429"></a>  type_attr: "T"
<a name="line-1430"></a>}
<a name="line-1431"></a>output_arg {
<a name="line-1432"></a>  description: "`gradients * (features &gt; 0)`."
<a name="line-1433"></a>  name: "backprops"
<a name="line-1434"></a>  type_attr: "T"
<a name="line-1435"></a>}
<a name="line-1436"></a>-}</span>
<a name="line-1437"></a>
<a name="line-1438"></a><span class='hs-comment'>-- | Computes the gradient of morphological 2-D dilation with respect to the input.</span>
<a name="line-1439"></a>
<a name="line-1440"></a><a name="dilation2DBackpropInput"></a><span class='hs-definition'>dilation2DBackpropInput</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1441"></a>                                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1442"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1443"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1444"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1445"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1446"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1447"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1448"></a>                           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.</span>
<a name="line-1449"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.</span>
<a name="line-1450"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.</span>
<a name="line-1451"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`.</span>
<a name="line-1452"></a><span class='hs-definition'>dilation2DBackpropInput</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1453"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Dilation2DBackpropInput"</span>
<a name="line-1454"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1455"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-1456"></a><span class='hs-comment'>{-
<a name="line-1457"></a>attr {
<a name="line-1458"></a>  allowed_values {
<a name="line-1459"></a>    list {
<a name="line-1460"></a>      type: DT_FLOAT
<a name="line-1461"></a>      type: DT_DOUBLE
<a name="line-1462"></a>      type: DT_INT32
<a name="line-1463"></a>      type: DT_INT64
<a name="line-1464"></a>      type: DT_UINT8
<a name="line-1465"></a>      type: DT_INT16
<a name="line-1466"></a>      type: DT_INT8
<a name="line-1467"></a>      type: DT_UINT16
<a name="line-1468"></a>      type: DT_HALF
<a name="line-1469"></a>    }
<a name="line-1470"></a>  }
<a name="line-1471"></a>  name: "T"
<a name="line-1472"></a>  type: "type"
<a name="line-1473"></a>}
<a name="line-1474"></a>attr {
<a name="line-1475"></a>  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
<a name="line-1476"></a>  has_minimum: true
<a name="line-1477"></a>  minimum: 4
<a name="line-1478"></a>  name: "strides"
<a name="line-1479"></a>  type: "list(int)"
<a name="line-1480"></a>}
<a name="line-1481"></a>attr {
<a name="line-1482"></a>  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
<a name="line-1483"></a>  has_minimum: true
<a name="line-1484"></a>  minimum: 4
<a name="line-1485"></a>  name: "rates"
<a name="line-1486"></a>  type: "list(int)"
<a name="line-1487"></a>}
<a name="line-1488"></a>attr {
<a name="line-1489"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1490"></a>  description: "The type of padding algorithm to use."
<a name="line-1491"></a>  name: "padding"
<a name="line-1492"></a>  type: "string"
<a name="line-1493"></a>}
<a name="line-1494"></a>input_arg {
<a name="line-1495"></a>  description: "4-D with shape `[batch, in_height, in_width, depth]`."
<a name="line-1496"></a>  name: "input"
<a name="line-1497"></a>  type_attr: "T"
<a name="line-1498"></a>}
<a name="line-1499"></a>input_arg {
<a name="line-1500"></a>  description: "3-D with shape `[filter_height, filter_width, depth]`."
<a name="line-1501"></a>  name: "filter"
<a name="line-1502"></a>  type_attr: "T"
<a name="line-1503"></a>}
<a name="line-1504"></a>input_arg {
<a name="line-1505"></a>  description: "4-D with shape `[batch, out_height, out_width, depth]`."
<a name="line-1506"></a>  name: "out_backprop"
<a name="line-1507"></a>  type_attr: "T"
<a name="line-1508"></a>}
<a name="line-1509"></a>output_arg {
<a name="line-1510"></a>  description: "4-D with shape `[batch, in_height, in_width, depth]`."
<a name="line-1511"></a>  name: "in_backprop"
<a name="line-1512"></a>  type_attr: "T"
<a name="line-1513"></a>}
<a name="line-1514"></a>-}</span>
<a name="line-1515"></a>
<a name="line-1516"></a><span class='hs-comment'>-- | Computes gradients of the maxpooling function.</span>
<a name="line-1517"></a>
<a name="line-1518"></a><a name="maxPoolGrad"></a><span class='hs-definition'>maxPoolGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1519"></a>                                                          <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1520"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __orig_input__: The original input tensor.</span>
<a name="line-1521"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __orig_output__: The original output tensor.</span>
<a name="line-1522"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: 4-D.  Gradients w.r.t. the output of `max_pool`.</span>
<a name="line-1523"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Gradients w.r.t. the input to `max_pool`.</span>
<a name="line-1524"></a><span class='hs-definition'>maxPoolGrad</span> <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1525"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPoolGrad"</span>
<a name="line-1526"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1527"></a>        <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>grad</span>
<a name="line-1528"></a><span class='hs-comment'>{-
<a name="line-1529"></a>attr {
<a name="line-1530"></a>  description: "The size of the window for each dimension of the input tensor."
<a name="line-1531"></a>  has_minimum: true
<a name="line-1532"></a>  minimum: 4
<a name="line-1533"></a>  name: "ksize"
<a name="line-1534"></a>  type: "list(int)"
<a name="line-1535"></a>}
<a name="line-1536"></a>attr {
<a name="line-1537"></a>  description: "The stride of the sliding window for each dimension of the\ninput tensor."
<a name="line-1538"></a>  has_minimum: true
<a name="line-1539"></a>  minimum: 4
<a name="line-1540"></a>  name: "strides"
<a name="line-1541"></a>  type: "list(int)"
<a name="line-1542"></a>}
<a name="line-1543"></a>attr {
<a name="line-1544"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1545"></a>  description: "The type of padding algorithm to use."
<a name="line-1546"></a>  name: "padding"
<a name="line-1547"></a>  type: "string"
<a name="line-1548"></a>}
<a name="line-1549"></a>attr {
<a name="line-1550"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-1551"></a>  default_value { s: "NHWC" }
<a name="line-1552"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-1553"></a>  name: "data_format"
<a name="line-1554"></a>  type: "string"
<a name="line-1555"></a>}
<a name="line-1556"></a>attr {
<a name="line-1557"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-1558"></a>  default_value { type: DT_FLOAT }
<a name="line-1559"></a>  name: "T"
<a name="line-1560"></a>  type: "type"
<a name="line-1561"></a>}
<a name="line-1562"></a>input_arg {
<a name="line-1563"></a>  description: "The original input tensor."
<a name="line-1564"></a>  name: "orig_input"
<a name="line-1565"></a>  type_attr: "T"
<a name="line-1566"></a>}
<a name="line-1567"></a>input_arg {
<a name="line-1568"></a>  description: "The original output tensor."
<a name="line-1569"></a>  name: "orig_output"
<a name="line-1570"></a>  type_attr: "T"
<a name="line-1571"></a>}
<a name="line-1572"></a>input_arg {
<a name="line-1573"></a>  description: "4-D.  Gradients w.r.t. the output of `max_pool`."
<a name="line-1574"></a>  name: "grad"
<a name="line-1575"></a>  type_attr: "T"
<a name="line-1576"></a>}
<a name="line-1577"></a>output_arg {
<a name="line-1578"></a>  description: "Gradients w.r.t. the input to `max_pool`."
<a name="line-1579"></a>  name: "output"
<a name="line-1580"></a>  type_attr: "T"
<a name="line-1581"></a>}
<a name="line-1582"></a>-}</span>
<a name="line-1583"></a>
<a name="line-1584"></a><span class='hs-comment'>-- | Gradients for Local Response Normalization.</span>
<a name="line-1585"></a>
<a name="line-1586"></a><a name="lRNGrad"></a><span class='hs-definition'>lRNGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1587"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1588"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-1589"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-1590"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-1591"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The gradients for LRN.</span>
<a name="line-1592"></a><span class='hs-definition'>lRNGrad</span> <span class='hs-varid'>input_grads</span> <span class='hs-varid'>input_image</span> <span class='hs-varid'>output_image</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1593"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LRNGrad"</span>
<a name="line-1594"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1595"></a>        <span class='hs-varid'>input_grads</span> <span class='hs-varid'>input_image</span> <span class='hs-varid'>output_image</span>
<a name="line-1596"></a><span class='hs-comment'>{-
<a name="line-1597"></a>attr {
<a name="line-1598"></a>  default_value { i: 5 }
<a name="line-1599"></a>  description: "A depth radius."
<a name="line-1600"></a>  name: "depth_radius"
<a name="line-1601"></a>  type: "int"
<a name="line-1602"></a>}
<a name="line-1603"></a>attr {
<a name="line-1604"></a>  default_value { f: 1.0 }
<a name="line-1605"></a>  description: "An offset (usually &gt; 0 to avoid dividing by 0)."
<a name="line-1606"></a>  name: "bias"
<a name="line-1607"></a>  type: "float"
<a name="line-1608"></a>}
<a name="line-1609"></a>attr {
<a name="line-1610"></a>  default_value { f: 1.0 }
<a name="line-1611"></a>  description: "A scale factor, usually positive."
<a name="line-1612"></a>  name: "alpha"
<a name="line-1613"></a>  type: "float"
<a name="line-1614"></a>}
<a name="line-1615"></a>attr {
<a name="line-1616"></a>  default_value { f: 0.5 }
<a name="line-1617"></a>  description: "An exponent."
<a name="line-1618"></a>  name: "beta"
<a name="line-1619"></a>  type: "float"
<a name="line-1620"></a>}
<a name="line-1621"></a>attr {
<a name="line-1622"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-1623"></a>  default_value { type: DT_FLOAT }
<a name="line-1624"></a>  name: "T"
<a name="line-1625"></a>  type: "type"
<a name="line-1626"></a>}
<a name="line-1627"></a>input_arg {
<a name="line-1628"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-1629"></a>  name: "input_grads"
<a name="line-1630"></a>  type_attr: "T"
<a name="line-1631"></a>}
<a name="line-1632"></a>input_arg {
<a name="line-1633"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-1634"></a>  name: "input_image"
<a name="line-1635"></a>  type_attr: "T"
<a name="line-1636"></a>}
<a name="line-1637"></a>input_arg {
<a name="line-1638"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-1639"></a>  name: "output_image"
<a name="line-1640"></a>  type_attr: "T"
<a name="line-1641"></a>}
<a name="line-1642"></a>output_arg {
<a name="line-1643"></a>  description: "The gradients for LRN." name: "output" type_attr: "T"
<a name="line-1644"></a>}
<a name="line-1645"></a>-}</span>
<a name="line-1646"></a>
<a name="line-1647"></a><span class='hs-comment'>-- | Computes gradients of max pooling function.</span>
<a name="line-1648"></a>
<a name="line-1649"></a><a name="maxPool3DGrad"></a><span class='hs-definition'>maxPool3DGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1650"></a>                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1651"></a>                                              <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1652"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1653"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1654"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-1655"></a>                                              <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1656"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __orig_input__: The original input tensor.</span>
<a name="line-1657"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __orig_output__: The original output tensor.</span>
<a name="line-1658"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.</span>
<a name="line-1659"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-1660"></a><span class='hs-definition'>maxPool3DGrad</span> <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1661"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPool3DGrad"</span>
<a name="line-1662"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1663"></a>        <span class='hs-varid'>orig_input</span> <span class='hs-varid'>orig_output</span> <span class='hs-varid'>grad</span>
<a name="line-1664"></a><span class='hs-comment'>{-
<a name="line-1665"></a>attr {
<a name="line-1666"></a>  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
<a name="line-1667"></a>  has_minimum: true
<a name="line-1668"></a>  minimum: 5
<a name="line-1669"></a>  name: "ksize"
<a name="line-1670"></a>  type: "list(int)"
<a name="line-1671"></a>}
<a name="line-1672"></a>attr {
<a name="line-1673"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-1674"></a>  has_minimum: true
<a name="line-1675"></a>  minimum: 5
<a name="line-1676"></a>  name: "strides"
<a name="line-1677"></a>  type: "list(int)"
<a name="line-1678"></a>}
<a name="line-1679"></a>attr {
<a name="line-1680"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1681"></a>  description: "The type of padding algorithm to use."
<a name="line-1682"></a>  name: "padding"
<a name="line-1683"></a>  type: "string"
<a name="line-1684"></a>}
<a name="line-1685"></a>attr {
<a name="line-1686"></a>  allowed_values {
<a name="line-1687"></a>    list {
<a name="line-1688"></a>      type: DT_FLOAT
<a name="line-1689"></a>      type: DT_DOUBLE
<a name="line-1690"></a>      type: DT_INT64
<a name="line-1691"></a>      type: DT_INT32
<a name="line-1692"></a>      type: DT_UINT8
<a name="line-1693"></a>      type: DT_UINT16
<a name="line-1694"></a>      type: DT_INT16
<a name="line-1695"></a>      type: DT_INT8
<a name="line-1696"></a>      type: DT_COMPLEX64
<a name="line-1697"></a>      type: DT_COMPLEX128
<a name="line-1698"></a>      type: DT_QINT8
<a name="line-1699"></a>      type: DT_QUINT8
<a name="line-1700"></a>      type: DT_QINT32
<a name="line-1701"></a>      type: DT_HALF
<a name="line-1702"></a>    }
<a name="line-1703"></a>  }
<a name="line-1704"></a>  name: "T"
<a name="line-1705"></a>  type: "type"
<a name="line-1706"></a>}
<a name="line-1707"></a>input_arg {
<a name="line-1708"></a>  description: "The original input tensor."
<a name="line-1709"></a>  name: "orig_input"
<a name="line-1710"></a>  type: DT_FLOAT
<a name="line-1711"></a>}
<a name="line-1712"></a>input_arg {
<a name="line-1713"></a>  description: "The original output tensor."
<a name="line-1714"></a>  name: "orig_output"
<a name="line-1715"></a>  type: DT_FLOAT
<a name="line-1716"></a>}
<a name="line-1717"></a>input_arg {
<a name="line-1718"></a>  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
<a name="line-1719"></a>  name: "grad"
<a name="line-1720"></a>  type_attr: "T"
<a name="line-1721"></a>}
<a name="line-1722"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-1723"></a>-}</span>
<a name="line-1724"></a>
<a name="line-1725"></a><span class='hs-comment'>-- | Computes the gradients of 3-D convolution with respect to the filter.</span>
<a name="line-1726"></a>
<a name="line-1727"></a><a name="conv3DBackpropFilterV2"></a><span class='hs-definition'>conv3DBackpropFilterV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1728"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1729"></a>                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1730"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1731"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1732"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1733"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1734"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1735"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1736"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1737"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.</span>
<a name="line-1738"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,</span>
<a name="line-1739"></a>                                                      <span class='hs-comment'>-- where `filter` is a 5-D</span>
<a name="line-1740"></a>                                                      <span class='hs-comment'>-- `[filter_depth, filter_height, filter_width, in_channels, out_channels]`</span>
<a name="line-1741"></a>                                                      <span class='hs-comment'>-- tensor.</span>
<a name="line-1742"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,</span>
<a name="line-1743"></a>                                         <span class='hs-comment'>-- out_channels]`.</span>
<a name="line-1744"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-1745"></a><span class='hs-definition'>conv3DBackpropFilterV2</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1746"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv3DBackpropFilterV2"</span>
<a name="line-1747"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1748"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span> <span class='hs-varid'>out_backprop</span>
<a name="line-1749"></a><span class='hs-comment'>{-
<a name="line-1750"></a>attr {
<a name="line-1751"></a>  allowed_values {
<a name="line-1752"></a>    list {
<a name="line-1753"></a>      type: DT_FLOAT
<a name="line-1754"></a>      type: DT_DOUBLE
<a name="line-1755"></a>      type: DT_INT64
<a name="line-1756"></a>      type: DT_INT32
<a name="line-1757"></a>      type: DT_UINT8
<a name="line-1758"></a>      type: DT_UINT16
<a name="line-1759"></a>      type: DT_INT16
<a name="line-1760"></a>      type: DT_INT8
<a name="line-1761"></a>      type: DT_COMPLEX64
<a name="line-1762"></a>      type: DT_COMPLEX128
<a name="line-1763"></a>      type: DT_QINT8
<a name="line-1764"></a>      type: DT_QUINT8
<a name="line-1765"></a>      type: DT_QINT32
<a name="line-1766"></a>      type: DT_HALF
<a name="line-1767"></a>    }
<a name="line-1768"></a>  }
<a name="line-1769"></a>  name: "T"
<a name="line-1770"></a>  type: "type"
<a name="line-1771"></a>}
<a name="line-1772"></a>attr {
<a name="line-1773"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-1774"></a>  has_minimum: true
<a name="line-1775"></a>  minimum: 5
<a name="line-1776"></a>  name: "strides"
<a name="line-1777"></a>  type: "list(int)"
<a name="line-1778"></a>}
<a name="line-1779"></a>attr {
<a name="line-1780"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1781"></a>  description: "The type of padding algorithm to use."
<a name="line-1782"></a>  name: "padding"
<a name="line-1783"></a>  type: "string"
<a name="line-1784"></a>}
<a name="line-1785"></a>input_arg {
<a name="line-1786"></a>  description: "Shape `[batch, depth, rows, cols, in_channels]`."
<a name="line-1787"></a>  name: "input"
<a name="line-1788"></a>  type_attr: "T"
<a name="line-1789"></a>}
<a name="line-1790"></a>input_arg {
<a name="line-1791"></a>  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor."
<a name="line-1792"></a>  name: "filter_sizes"
<a name="line-1793"></a>  type: DT_INT32
<a name="line-1794"></a>}
<a name="line-1795"></a>input_arg {
<a name="line-1796"></a>  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
<a name="line-1797"></a>  name: "out_backprop"
<a name="line-1798"></a>  type_attr: "T"
<a name="line-1799"></a>}
<a name="line-1800"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-1801"></a>-}</span>
<a name="line-1802"></a>
<a name="line-1803"></a><span class='hs-comment'>-- | Computes the gradients of 3-D convolution with respect to the filter.</span>
<a name="line-1804"></a>
<a name="line-1805"></a><a name="conv3DBackpropFilter"></a><span class='hs-definition'>conv3DBackpropFilter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1806"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1807"></a>                                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1808"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1809"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1810"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-1811"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1812"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1813"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1814"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1815"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.</span>
<a name="line-1816"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.</span>
<a name="line-1817"></a>                                       <span class='hs-comment'>-- `in_channels` must match between `input` and `filter`.</span>
<a name="line-1818"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,</span>
<a name="line-1819"></a>                                       <span class='hs-comment'>-- out_channels]`.</span>
<a name="line-1820"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-1821"></a><span class='hs-definition'>conv3DBackpropFilter</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1822"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv3DBackpropFilter"</span>
<a name="line-1823"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1824"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-1825"></a><span class='hs-comment'>{-
<a name="line-1826"></a>attr {
<a name="line-1827"></a>  allowed_values {
<a name="line-1828"></a>    list {
<a name="line-1829"></a>      type: DT_FLOAT
<a name="line-1830"></a>      type: DT_DOUBLE
<a name="line-1831"></a>      type: DT_INT64
<a name="line-1832"></a>      type: DT_INT32
<a name="line-1833"></a>      type: DT_UINT8
<a name="line-1834"></a>      type: DT_UINT16
<a name="line-1835"></a>      type: DT_INT16
<a name="line-1836"></a>      type: DT_INT8
<a name="line-1837"></a>      type: DT_COMPLEX64
<a name="line-1838"></a>      type: DT_COMPLEX128
<a name="line-1839"></a>      type: DT_QINT8
<a name="line-1840"></a>      type: DT_QUINT8
<a name="line-1841"></a>      type: DT_QINT32
<a name="line-1842"></a>      type: DT_HALF
<a name="line-1843"></a>    }
<a name="line-1844"></a>  }
<a name="line-1845"></a>  name: "T"
<a name="line-1846"></a>  type: "type"
<a name="line-1847"></a>}
<a name="line-1848"></a>attr {
<a name="line-1849"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-1850"></a>  has_minimum: true
<a name="line-1851"></a>  minimum: 5
<a name="line-1852"></a>  name: "strides"
<a name="line-1853"></a>  type: "list(int)"
<a name="line-1854"></a>}
<a name="line-1855"></a>attr {
<a name="line-1856"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1857"></a>  description: "The type of padding algorithm to use."
<a name="line-1858"></a>  name: "padding"
<a name="line-1859"></a>  type: "string"
<a name="line-1860"></a>}
<a name="line-1861"></a>input_arg {
<a name="line-1862"></a>  description: "Shape `[batch, depth, rows, cols, in_channels]`."
<a name="line-1863"></a>  name: "input"
<a name="line-1864"></a>  type_attr: "T"
<a name="line-1865"></a>}
<a name="line-1866"></a>input_arg {
<a name="line-1867"></a>  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
<a name="line-1868"></a>  name: "filter"
<a name="line-1869"></a>  type_attr: "T"
<a name="line-1870"></a>}
<a name="line-1871"></a>input_arg {
<a name="line-1872"></a>  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
<a name="line-1873"></a>  name: "out_backprop"
<a name="line-1874"></a>  type_attr: "T"
<a name="line-1875"></a>}
<a name="line-1876"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-1877"></a>-}</span>
<a name="line-1878"></a>
<a name="line-1879"></a><a name="conv3D"></a><span class='hs-comment'>-- | Computes a 3-D convolution given 5-D `input` and `filter` tensors.</span>
<a name="line-1880"></a><span class='hs-comment'>--</span>
<a name="line-1881"></a><span class='hs-comment'>-- In signal processing, cross-correlation is a measure of similarity of</span>
<a name="line-1882"></a><span class='hs-comment'>-- two waveforms as a function of a time-lag applied to one of them. This</span>
<a name="line-1883"></a><span class='hs-comment'>-- is also known as a sliding dot product or sliding inner-product.</span>
<a name="line-1884"></a><span class='hs-comment'>-- </span>
<a name="line-1885"></a><span class='hs-comment'>-- Our Conv3D implements a form of cross-correlation.</span>
<a name="line-1886"></a><span class='hs-definition'>conv3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1887"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-1888"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-1889"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-1890"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-1891"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-1892"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1893"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1894"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`.</span>
<a name="line-1895"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels,</span>
<a name="line-1896"></a>                         <span class='hs-comment'>-- out_channels]`. `in_channels` must match between `input` and `filter`.</span>
<a name="line-1897"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-1898"></a><span class='hs-definition'>conv3D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1899"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv3D"</span>
<a name="line-1900"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1901"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span>
<a name="line-1902"></a><span class='hs-comment'>{-
<a name="line-1903"></a>attr {
<a name="line-1904"></a>  allowed_values {
<a name="line-1905"></a>    list {
<a name="line-1906"></a>      type: DT_FLOAT
<a name="line-1907"></a>      type: DT_DOUBLE
<a name="line-1908"></a>      type: DT_INT64
<a name="line-1909"></a>      type: DT_INT32
<a name="line-1910"></a>      type: DT_UINT8
<a name="line-1911"></a>      type: DT_UINT16
<a name="line-1912"></a>      type: DT_INT16
<a name="line-1913"></a>      type: DT_INT8
<a name="line-1914"></a>      type: DT_COMPLEX64
<a name="line-1915"></a>      type: DT_COMPLEX128
<a name="line-1916"></a>      type: DT_QINT8
<a name="line-1917"></a>      type: DT_QUINT8
<a name="line-1918"></a>      type: DT_QINT32
<a name="line-1919"></a>      type: DT_HALF
<a name="line-1920"></a>    }
<a name="line-1921"></a>  }
<a name="line-1922"></a>  name: "T"
<a name="line-1923"></a>  type: "type"
<a name="line-1924"></a>}
<a name="line-1925"></a>attr {
<a name="line-1926"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-1927"></a>  has_minimum: true
<a name="line-1928"></a>  minimum: 5
<a name="line-1929"></a>  name: "strides"
<a name="line-1930"></a>  type: "list(int)"
<a name="line-1931"></a>}
<a name="line-1932"></a>attr {
<a name="line-1933"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1934"></a>  description: "The type of padding algorithm to use."
<a name="line-1935"></a>  name: "padding"
<a name="line-1936"></a>  type: "string"
<a name="line-1937"></a>}
<a name="line-1938"></a>input_arg {
<a name="line-1939"></a>  description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`."
<a name="line-1940"></a>  name: "input"
<a name="line-1941"></a>  type_attr: "T"
<a name="line-1942"></a>}
<a name="line-1943"></a>input_arg {
<a name="line-1944"></a>  description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`."
<a name="line-1945"></a>  name: "filter"
<a name="line-1946"></a>  type_attr: "T"
<a name="line-1947"></a>}
<a name="line-1948"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-1949"></a>-}</span>
<a name="line-1950"></a>
<a name="line-1951"></a><span class='hs-comment'>-- | Computes the gradients of depthwise convolution with respect to the filter.</span>
<a name="line-1952"></a>
<a name="line-1953"></a><a name="depthwiseConv2dNativeBackpropFilter"></a><span class='hs-definition'>depthwiseConv2dNativeBackpropFilter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-1954"></a>                                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-1955"></a>                                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-1956"></a>                                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.</span>
<a name="line-1957"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,</span>
<a name="line-1958"></a>                                                                   <span class='hs-comment'>-- where `filter` is a 4-D</span>
<a name="line-1959"></a>                                                                   <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.</span>
<a name="line-1960"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.</span>
<a name="line-1961"></a>                                                      <span class='hs-comment'>-- Gradients w.r.t. the output of the convolution.</span>
<a name="line-1962"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape</span>
<a name="line-1963"></a>                                       <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.</span>
<a name="line-1964"></a>                                       <span class='hs-comment'>-- the `filter` input of the convolution.</span>
<a name="line-1965"></a><span class='hs-definition'>depthwiseConv2dNativeBackpropFilter</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span>
<a name="line-1966"></a>                                    <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-1967"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DepthwiseConv2dNativeBackpropFilter"</span>
<a name="line-1968"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-1969"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span> <span class='hs-varid'>out_backprop</span>
<a name="line-1970"></a><span class='hs-comment'>{-
<a name="line-1971"></a>attr {
<a name="line-1972"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-1973"></a>  name: "T"
<a name="line-1974"></a>  type: "type"
<a name="line-1975"></a>}
<a name="line-1976"></a>attr {
<a name="line-1977"></a>  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
<a name="line-1978"></a>  name: "strides"
<a name="line-1979"></a>  type: "list(int)"
<a name="line-1980"></a>}
<a name="line-1981"></a>attr {
<a name="line-1982"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-1983"></a>  description: "The type of padding algorithm to use."
<a name="line-1984"></a>  name: "padding"
<a name="line-1985"></a>  type: "string"
<a name="line-1986"></a>}
<a name="line-1987"></a>input_arg {
<a name="line-1988"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
<a name="line-1989"></a>  name: "input"
<a name="line-1990"></a>  type_attr: "T"
<a name="line-1991"></a>}
<a name="line-1992"></a>input_arg {
<a name="line-1993"></a>  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor."
<a name="line-1994"></a>  name: "filter_sizes"
<a name="line-1995"></a>  type: DT_INT32
<a name="line-1996"></a>}
<a name="line-1997"></a>input_arg {
<a name="line-1998"></a>  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
<a name="line-1999"></a>  name: "out_backprop"
<a name="line-2000"></a>  type_attr: "T"
<a name="line-2001"></a>}
<a name="line-2002"></a>output_arg {
<a name="line-2003"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
<a name="line-2004"></a>  name: "output"
<a name="line-2005"></a>  type_attr: "T"
<a name="line-2006"></a>}
<a name="line-2007"></a>-}</span>
<a name="line-2008"></a>
<a name="line-2009"></a><span class='hs-comment'>-- | Computes the gradients of convolution with respect to the filter.</span>
<a name="line-2010"></a>
<a name="line-2011"></a><a name="conv2DBackpropFilter"></a><span class='hs-definition'>conv2DBackpropFilter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2012"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2013"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2014"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.</span>
<a name="line-2015"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,</span>
<a name="line-2016"></a>                                                    <span class='hs-comment'>-- where `filter` is a 4-D</span>
<a name="line-2017"></a>                                                    <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]` tensor.</span>
<a name="line-2018"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.</span>
<a name="line-2019"></a>                                       <span class='hs-comment'>-- Gradients w.r.t. the output of the convolution.</span>
<a name="line-2020"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape</span>
<a name="line-2021"></a>                        <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.</span>
<a name="line-2022"></a>                        <span class='hs-comment'>-- the `filter` input of the convolution.</span>
<a name="line-2023"></a><span class='hs-definition'>conv2DBackpropFilter</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2024"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv2DBackpropFilter"</span>
<a name="line-2025"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2026"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter_sizes</span> <span class='hs-varid'>out_backprop</span>
<a name="line-2027"></a><span class='hs-comment'>{-
<a name="line-2028"></a>attr {
<a name="line-2029"></a>  allowed_values {
<a name="line-2030"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-2031"></a>  }
<a name="line-2032"></a>  name: "T"
<a name="line-2033"></a>  type: "type"
<a name="line-2034"></a>}
<a name="line-2035"></a>attr {
<a name="line-2036"></a>  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
<a name="line-2037"></a>  name: "strides"
<a name="line-2038"></a>  type: "list(int)"
<a name="line-2039"></a>}
<a name="line-2040"></a>attr {
<a name="line-2041"></a>  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
<a name="line-2042"></a>}
<a name="line-2043"></a>attr {
<a name="line-2044"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-2045"></a>  description: "The type of padding algorithm to use."
<a name="line-2046"></a>  name: "padding"
<a name="line-2047"></a>  type: "string"
<a name="line-2048"></a>}
<a name="line-2049"></a>attr {
<a name="line-2050"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-2051"></a>  default_value { s: "NHWC" }
<a name="line-2052"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-2053"></a>  name: "data_format"
<a name="line-2054"></a>  type: "string"
<a name="line-2055"></a>}
<a name="line-2056"></a>input_arg {
<a name="line-2057"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
<a name="line-2058"></a>  name: "input"
<a name="line-2059"></a>  type_attr: "T"
<a name="line-2060"></a>}
<a name="line-2061"></a>input_arg {
<a name="line-2062"></a>  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor."
<a name="line-2063"></a>  name: "filter_sizes"
<a name="line-2064"></a>  type: DT_INT32
<a name="line-2065"></a>}
<a name="line-2066"></a>input_arg {
<a name="line-2067"></a>  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
<a name="line-2068"></a>  name: "out_backprop"
<a name="line-2069"></a>  type_attr: "T"
<a name="line-2070"></a>}
<a name="line-2071"></a>output_arg {
<a name="line-2072"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
<a name="line-2073"></a>  name: "output"
<a name="line-2074"></a>  type_attr: "T"
<a name="line-2075"></a>}
<a name="line-2076"></a>-}</span>
<a name="line-2077"></a>
<a name="line-2078"></a><span class='hs-comment'>-- | Computes the gradients of convolution with respect to the input.</span>
<a name="line-2079"></a>
<a name="line-2080"></a><a name="conv2DBackpropInput"></a><span class='hs-definition'>conv2DBackpropInput</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2081"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2082"></a>                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2083"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __input_sizes__: An integer vector representing the shape of `input`,</span>
<a name="line-2084"></a>                                                <span class='hs-comment'>-- where `input` is a 4-D `[batch, height, width, channels]` tensor.</span>
<a name="line-2085"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 4-D with shape</span>
<a name="line-2086"></a>                                      <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`.</span>
<a name="line-2087"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.</span>
<a name="line-2088"></a>                                      <span class='hs-comment'>-- Gradients w.r.t. the output of the convolution.</span>
<a name="line-2089"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient</span>
<a name="line-2090"></a>                       <span class='hs-comment'>-- w.r.t. the input of the convolution.</span>
<a name="line-2091"></a><span class='hs-definition'>conv2DBackpropInput</span> <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2092"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv2DBackpropInput"</span>
<a name="line-2093"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2094"></a>        <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-2095"></a><span class='hs-comment'>{-
<a name="line-2096"></a>attr {
<a name="line-2097"></a>  allowed_values {
<a name="line-2098"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-2099"></a>  }
<a name="line-2100"></a>  name: "T"
<a name="line-2101"></a>  type: "type"
<a name="line-2102"></a>}
<a name="line-2103"></a>attr {
<a name="line-2104"></a>  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
<a name="line-2105"></a>  name: "strides"
<a name="line-2106"></a>  type: "list(int)"
<a name="line-2107"></a>}
<a name="line-2108"></a>attr {
<a name="line-2109"></a>  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
<a name="line-2110"></a>}
<a name="line-2111"></a>attr {
<a name="line-2112"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-2113"></a>  description: "The type of padding algorithm to use."
<a name="line-2114"></a>  name: "padding"
<a name="line-2115"></a>  type: "string"
<a name="line-2116"></a>}
<a name="line-2117"></a>attr {
<a name="line-2118"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-2119"></a>  default_value { s: "NHWC" }
<a name="line-2120"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-2121"></a>  name: "data_format"
<a name="line-2122"></a>  type: "string"
<a name="line-2123"></a>}
<a name="line-2124"></a>input_arg {
<a name="line-2125"></a>  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
<a name="line-2126"></a>  name: "input_sizes"
<a name="line-2127"></a>  type: DT_INT32
<a name="line-2128"></a>}
<a name="line-2129"></a>input_arg {
<a name="line-2130"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
<a name="line-2131"></a>  name: "filter"
<a name="line-2132"></a>  type_attr: "T"
<a name="line-2133"></a>}
<a name="line-2134"></a>input_arg {
<a name="line-2135"></a>  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
<a name="line-2136"></a>  name: "out_backprop"
<a name="line-2137"></a>  type_attr: "T"
<a name="line-2138"></a>}
<a name="line-2139"></a>output_arg {
<a name="line-2140"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
<a name="line-2141"></a>  name: "output"
<a name="line-2142"></a>  type_attr: "T"
<a name="line-2143"></a>}
<a name="line-2144"></a>-}</span>
<a name="line-2145"></a>
<a name="line-2146"></a><a name="conv2D"></a><span class='hs-comment'>-- | Computes a 2-D convolution given 4-D `input` and `filter` tensors.</span>
<a name="line-2147"></a><span class='hs-comment'>--</span>
<a name="line-2148"></a><span class='hs-comment'>-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`</span>
<a name="line-2149"></a><span class='hs-comment'>-- and a filter / kernel tensor of shape</span>
<a name="line-2150"></a><span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`, this op</span>
<a name="line-2151"></a><span class='hs-comment'>-- performs the following:</span>
<a name="line-2152"></a><span class='hs-comment'>-- </span>
<a name="line-2153"></a><span class='hs-comment'>-- 1. Flattens the filter to a 2-D matrix with shape</span>
<a name="line-2154"></a><span class='hs-comment'>--    `[filter_height * filter_width * in_channels, output_channels]`.</span>
<a name="line-2155"></a><span class='hs-comment'>-- 2. Extracts image patches from the input tensor to form a *virtual*</span>
<a name="line-2156"></a><span class='hs-comment'>--    tensor of shape `[batch, out_height, out_width,</span>
<a name="line-2157"></a><span class='hs-comment'>--    filter_height * filter_width * in_channels]`.</span>
<a name="line-2158"></a><span class='hs-comment'>-- 3. For each patch, right-multiplies the filter matrix and the image patch</span>
<a name="line-2159"></a><span class='hs-comment'>--    vector.</span>
<a name="line-2160"></a><span class='hs-comment'>-- </span>
<a name="line-2161"></a><span class='hs-comment'>-- In detail, with the default NHWC format,</span>
<a name="line-2162"></a><span class='hs-comment'>-- </span>
<a name="line-2163"></a><span class='hs-comment'>--     output[b, i, j, k] =</span>
<a name="line-2164"></a><span class='hs-comment'>--         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *</span>
<a name="line-2165"></a><span class='hs-comment'>--                         filter[di, dj, q, k]</span>
<a name="line-2166"></a><span class='hs-comment'>-- </span>
<a name="line-2167"></a><span class='hs-comment'>-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same</span>
<a name="line-2168"></a><span class='hs-comment'>-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.</span>
<a name="line-2169"></a><span class='hs-definition'>conv2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2170"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2171"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2172"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__</span>
<a name="line-2173"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-2174"></a><span class='hs-definition'>conv2D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2175"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv2D"</span>
<a name="line-2176"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2177"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span>
<a name="line-2178"></a><span class='hs-comment'>{-
<a name="line-2179"></a>attr {
<a name="line-2180"></a>  allowed_values {
<a name="line-2181"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-2182"></a>  }
<a name="line-2183"></a>  name: "T"
<a name="line-2184"></a>  type: "type"
<a name="line-2185"></a>}
<a name="line-2186"></a>attr {
<a name="line-2187"></a>  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
<a name="line-2188"></a>  name: "strides"
<a name="line-2189"></a>  type: "list(int)"
<a name="line-2190"></a>}
<a name="line-2191"></a>attr {
<a name="line-2192"></a>  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
<a name="line-2193"></a>}
<a name="line-2194"></a>attr {
<a name="line-2195"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-2196"></a>  description: "The type of padding algorithm to use."
<a name="line-2197"></a>  name: "padding"
<a name="line-2198"></a>  type: "string"
<a name="line-2199"></a>}
<a name="line-2200"></a>attr {
<a name="line-2201"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-2202"></a>  default_value { s: "NHWC" }
<a name="line-2203"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-2204"></a>  name: "data_format"
<a name="line-2205"></a>  type: "string"
<a name="line-2206"></a>}
<a name="line-2207"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-2208"></a>input_arg { name: "filter" type_attr: "T" }
<a name="line-2209"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-2210"></a>-}</span>
<a name="line-2211"></a>
<a name="line-2212"></a><a name="biasAdd"></a><span class='hs-comment'>-- | Adds `bias` to `value`.</span>
<a name="line-2213"></a><span class='hs-comment'>--</span>
<a name="line-2214"></a><span class='hs-comment'>-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.</span>
<a name="line-2215"></a><span class='hs-comment'>-- Broadcasting is supported, so `value` may have any number of dimensions.</span>
<a name="line-2216"></a><span class='hs-definition'>biasAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2217"></a>                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2218"></a>                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2219"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2220"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-2221"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2222"></a>                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2223"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: Any number of dimensions.</span>
<a name="line-2224"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __bias__: 1-D with size the last dimension of `value`.</span>
<a name="line-2225"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Broadcasted sum of `value` and `bias`.</span>
<a name="line-2226"></a><span class='hs-definition'>biasAdd</span> <span class='hs-varid'>value</span> <span class='hs-varid'>bias</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2227"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BiasAdd"</span>
<a name="line-2228"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2229"></a>        <span class='hs-varid'>value</span> <span class='hs-varid'>bias</span>
<a name="line-2230"></a><span class='hs-comment'>{-
<a name="line-2231"></a>attr {
<a name="line-2232"></a>  allowed_values {
<a name="line-2233"></a>    list {
<a name="line-2234"></a>      type: DT_FLOAT
<a name="line-2235"></a>      type: DT_DOUBLE
<a name="line-2236"></a>      type: DT_INT64
<a name="line-2237"></a>      type: DT_INT32
<a name="line-2238"></a>      type: DT_UINT8
<a name="line-2239"></a>      type: DT_UINT16
<a name="line-2240"></a>      type: DT_INT16
<a name="line-2241"></a>      type: DT_INT8
<a name="line-2242"></a>      type: DT_COMPLEX64
<a name="line-2243"></a>      type: DT_COMPLEX128
<a name="line-2244"></a>      type: DT_QINT8
<a name="line-2245"></a>      type: DT_QUINT8
<a name="line-2246"></a>      type: DT_QINT32
<a name="line-2247"></a>      type: DT_HALF
<a name="line-2248"></a>    }
<a name="line-2249"></a>  }
<a name="line-2250"></a>  name: "T"
<a name="line-2251"></a>  type: "type"
<a name="line-2252"></a>}
<a name="line-2253"></a>attr {
<a name="line-2254"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-2255"></a>  default_value { s: "NHWC" }
<a name="line-2256"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
<a name="line-2257"></a>  name: "data_format"
<a name="line-2258"></a>  type: "string"
<a name="line-2259"></a>}
<a name="line-2260"></a>input_arg {
<a name="line-2261"></a>  description: "Any number of dimensions."
<a name="line-2262"></a>  name: "value"
<a name="line-2263"></a>  type_attr: "T"
<a name="line-2264"></a>}
<a name="line-2265"></a>input_arg {
<a name="line-2266"></a>  description: "1-D with size the last dimension of `value`."
<a name="line-2267"></a>  name: "bias"
<a name="line-2268"></a>  type_attr: "T"
<a name="line-2269"></a>}
<a name="line-2270"></a>output_arg {
<a name="line-2271"></a>  description: "Broadcasted sum of `value` and `bias`."
<a name="line-2272"></a>  name: "output"
<a name="line-2273"></a>  type_attr: "T"
<a name="line-2274"></a>}
<a name="line-2275"></a>-}</span>
<a name="line-2276"></a>
<a name="line-2277"></a><a name="fusedBatchNorm"></a><span class='hs-comment'>-- | Batch normalization.</span>
<a name="line-2278"></a><span class='hs-comment'>--</span>
<a name="line-2279"></a><span class='hs-comment'>-- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".</span>
<a name="line-2280"></a><span class='hs-comment'>-- The size of 1D Tensors matches the dimension C of the 4D Tensors.</span>
<a name="line-2281"></a><span class='hs-definition'>fusedBatchNorm</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2282"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2283"></a>                                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2284"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2285"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2286"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-2287"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-2288"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2289"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2290"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2291"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: A 4D Tensor for input data.</span>
<a name="line-2292"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.</span>
<a name="line-2293"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x.</span>
<a name="line-2294"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mean__: A 1D Tensor for population mean. Used for inference only;</span>
<a name="line-2295"></a>                                 <span class='hs-comment'>-- must be empty for training.</span>
<a name="line-2296"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __variance__: A 1D Tensor for population variance. Used for inference only;</span>
<a name="line-2297"></a>                                 <span class='hs-comment'>-- must be empty for training.</span>
<a name="line-2298"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2299"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-2300"></a>                  <span class='hs-comment'>-- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__)</span>
<a name="line-2301"></a>                  <span class='hs-comment'>--</span>
<a name="line-2302"></a>                  <span class='hs-comment'>-- * __y__: A 4D Tensor for output data.</span>
<a name="line-2303"></a>                  <span class='hs-comment'>--</span>
<a name="line-2304"></a>                  <span class='hs-comment'>-- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow</span>
<a name="line-2305"></a>                  <span class='hs-comment'>-- to compute the running mean.</span>
<a name="line-2306"></a>                  <span class='hs-comment'>--</span>
<a name="line-2307"></a>                  <span class='hs-comment'>-- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by</span>
<a name="line-2308"></a>                  <span class='hs-comment'>-- TensorFlow to compute the running variance.</span>
<a name="line-2309"></a>                  <span class='hs-comment'>--</span>
<a name="line-2310"></a>                  <span class='hs-comment'>-- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused</span>
<a name="line-2311"></a>                  <span class='hs-comment'>-- in the gradient computation.</span>
<a name="line-2312"></a>                  <span class='hs-comment'>--</span>
<a name="line-2313"></a>                  <span class='hs-comment'>-- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance</span>
<a name="line-2314"></a>                  <span class='hs-comment'>-- in the cuDNN case), to be used in the gradient computation.</span>
<a name="line-2315"></a><span class='hs-definition'>fusedBatchNorm</span> <span class='hs-varid'>x</span> <span class='hs-varid'>scale</span> <span class='hs-varid'>offset</span> <span class='hs-varid'>mean</span> <span class='hs-varid'>variance</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2316"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FusedBatchNorm"</span>
<a name="line-2317"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2318"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>scale</span> <span class='hs-varid'>offset</span> <span class='hs-varid'>mean</span> <span class='hs-varid'>variance</span>
<a name="line-2319"></a><span class='hs-comment'>{-
<a name="line-2320"></a>attr {
<a name="line-2321"></a>  allowed_values {
<a name="line-2322"></a>    list {
<a name="line-2323"></a>      type: DT_FLOAT
<a name="line-2324"></a>      type: DT_DOUBLE
<a name="line-2325"></a>      type: DT_INT64
<a name="line-2326"></a>      type: DT_INT32
<a name="line-2327"></a>      type: DT_UINT8
<a name="line-2328"></a>      type: DT_UINT16
<a name="line-2329"></a>      type: DT_INT16
<a name="line-2330"></a>      type: DT_INT8
<a name="line-2331"></a>      type: DT_COMPLEX64
<a name="line-2332"></a>      type: DT_COMPLEX128
<a name="line-2333"></a>      type: DT_QINT8
<a name="line-2334"></a>      type: DT_QUINT8
<a name="line-2335"></a>      type: DT_QINT32
<a name="line-2336"></a>      type: DT_HALF
<a name="line-2337"></a>    }
<a name="line-2338"></a>  }
<a name="line-2339"></a>  description: "The data type for the elements of input and output Tensors."
<a name="line-2340"></a>  name: "T"
<a name="line-2341"></a>  type: "type"
<a name="line-2342"></a>}
<a name="line-2343"></a>attr {
<a name="line-2344"></a>  default_value { f: 1.0e-4 }
<a name="line-2345"></a>  description: "A small float number added to the variance of x."
<a name="line-2346"></a>  name: "epsilon"
<a name="line-2347"></a>  type: "float"
<a name="line-2348"></a>}
<a name="line-2349"></a>attr {
<a name="line-2350"></a>  default_value { s: "NHWC" }
<a name="line-2351"></a>  description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"."
<a name="line-2352"></a>  name: "data_format"
<a name="line-2353"></a>  type: "string"
<a name="line-2354"></a>}
<a name="line-2355"></a>attr {
<a name="line-2356"></a>  default_value { b: true }
<a name="line-2357"></a>  description: "A bool value to indicate the operation is for training (default)\nor inference."
<a name="line-2358"></a>  name: "is_training"
<a name="line-2359"></a>  type: "bool"
<a name="line-2360"></a>}
<a name="line-2361"></a>input_arg {
<a name="line-2362"></a>  description: "A 4D Tensor for input data." name: "x" type_attr: "T"
<a name="line-2363"></a>}
<a name="line-2364"></a>input_arg {
<a name="line-2365"></a>  description: "A 1D Tensor for scaling factor, to scale the normalized x."
<a name="line-2366"></a>  name: "scale"
<a name="line-2367"></a>  type_attr: "T"
<a name="line-2368"></a>}
<a name="line-2369"></a>input_arg {
<a name="line-2370"></a>  description: "A 1D Tensor for offset, to shift to the normalized x."
<a name="line-2371"></a>  name: "offset"
<a name="line-2372"></a>  type_attr: "T"
<a name="line-2373"></a>}
<a name="line-2374"></a>input_arg {
<a name="line-2375"></a>  description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training."
<a name="line-2376"></a>  name: "mean"
<a name="line-2377"></a>  type_attr: "T"
<a name="line-2378"></a>}
<a name="line-2379"></a>input_arg {
<a name="line-2380"></a>  description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training."
<a name="line-2381"></a>  name: "variance"
<a name="line-2382"></a>  type_attr: "T"
<a name="line-2383"></a>}
<a name="line-2384"></a>output_arg {
<a name="line-2385"></a>  description: "A 4D Tensor for output data."
<a name="line-2386"></a>  name: "y"
<a name="line-2387"></a>  type_attr: "T"
<a name="line-2388"></a>}
<a name="line-2389"></a>output_arg {
<a name="line-2390"></a>  description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean."
<a name="line-2391"></a>  name: "batch_mean"
<a name="line-2392"></a>  type_attr: "T"
<a name="line-2393"></a>}
<a name="line-2394"></a>output_arg {
<a name="line-2395"></a>  description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance."
<a name="line-2396"></a>  name: "batch_variance"
<a name="line-2397"></a>  type_attr: "T"
<a name="line-2398"></a>}
<a name="line-2399"></a>output_arg {
<a name="line-2400"></a>  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
<a name="line-2401"></a>  name: "reserve_space_1"
<a name="line-2402"></a>  type_attr: "T"
<a name="line-2403"></a>}
<a name="line-2404"></a>output_arg {
<a name="line-2405"></a>  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
<a name="line-2406"></a>  name: "reserve_space_2"
<a name="line-2407"></a>  type_attr: "T"
<a name="line-2408"></a>}
<a name="line-2409"></a>-}</span>
<a name="line-2410"></a>
<a name="line-2411"></a><a name="batchNormWithGlobalNormalizationGrad"></a><span class='hs-comment'>-- | Gradients for batch normalization.</span>
<a name="line-2412"></a><span class='hs-comment'>--</span>
<a name="line-2413"></a><span class='hs-comment'>-- This op is deprecated. See `tf.nn.batch_normalization`.</span>
<a name="line-2414"></a><span class='hs-definition'>batchNormWithGlobalNormalizationGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2415"></a>                                                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2416"></a>                                                                           <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-2417"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2418"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2419"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-2420"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-2421"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2422"></a>                                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-2423"></a>                                                                           <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2424"></a>                                                                           <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2425"></a>                                        <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor</span>
<a name="line-2426"></a>                                             <span class='hs-comment'>-- needs to be multiplied with gamma.</span>
<a name="line-2427"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __variance_epsilon__: A small float number to avoid dividing by 0.</span>
<a name="line-2428"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __t__: A 4D input Tensor.</span>
<a name="line-2429"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.</span>
<a name="line-2430"></a>                                                       <span class='hs-comment'>-- This is the first output from tf.nn.moments,</span>
<a name="line-2431"></a>                                                       <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-2432"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.</span>
<a name="line-2433"></a>                                                       <span class='hs-comment'>-- This is the second output from tf.nn.moments,</span>
<a name="line-2434"></a>                                                       <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-2435"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.</span>
<a name="line-2436"></a>                                                       <span class='hs-comment'>-- If "scale_after_normalization" is true, this Tensor will be multiplied</span>
<a name="line-2437"></a>                                                       <span class='hs-comment'>-- with the normalized Tensor.</span>
<a name="line-2438"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprop__: 4D backprop Tensor.</span>
<a name="line-2439"></a>                                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2440"></a>                                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-2441"></a>                                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-2442"></a>                                        <span class='hs-comment'>-- ^ (__dx__, __dm__, __dv__, __db__, __dg__)</span>
<a name="line-2443"></a>                                        <span class='hs-comment'>--</span>
<a name="line-2444"></a>                                        <span class='hs-comment'>-- * __dx__: 4D backprop tensor for input.</span>
<a name="line-2445"></a>                                        <span class='hs-comment'>--</span>
<a name="line-2446"></a>                                        <span class='hs-comment'>-- * __dm__: 1D backprop tensor for mean.</span>
<a name="line-2447"></a>                                        <span class='hs-comment'>--</span>
<a name="line-2448"></a>                                        <span class='hs-comment'>-- * __dv__: 1D backprop tensor for variance.</span>
<a name="line-2449"></a>                                        <span class='hs-comment'>--</span>
<a name="line-2450"></a>                                        <span class='hs-comment'>-- * __db__: 1D backprop tensor for beta.</span>
<a name="line-2451"></a>                                        <span class='hs-comment'>--</span>
<a name="line-2452"></a>                                        <span class='hs-comment'>-- * __dg__: 1D backprop tensor for gamma.</span>
<a name="line-2453"></a><span class='hs-definition'>batchNormWithGlobalNormalizationGrad</span> <span class='hs-varid'>scale_after_normalization</span> <span class='hs-varid'>variance_epsilon</span>
<a name="line-2454"></a>                                     <span class='hs-varid'>t</span> <span class='hs-varid'>m</span> <span class='hs-varid'>v</span> <span class='hs-varid'>gamma</span> <span class='hs-varid'>backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2455"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchNormWithGlobalNormalizationGrad"</span>
<a name="line-2456"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-2457"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"scale_after_normalization"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>scale_after_normalization</span>
<a name="line-2458"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"variance_epsilon"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>variance_epsilon</span><span class='hs-layout'>)</span>
<a name="line-2459"></a>        <span class='hs-varid'>t</span> <span class='hs-varid'>m</span> <span class='hs-varid'>v</span> <span class='hs-varid'>gamma</span> <span class='hs-varid'>backprop</span>
<a name="line-2460"></a><span class='hs-comment'>{-
<a name="line-2461"></a>attr {
<a name="line-2462"></a>  allowed_values {
<a name="line-2463"></a>    list {
<a name="line-2464"></a>      type: DT_FLOAT
<a name="line-2465"></a>      type: DT_DOUBLE
<a name="line-2466"></a>      type: DT_INT64
<a name="line-2467"></a>      type: DT_INT32
<a name="line-2468"></a>      type: DT_UINT8
<a name="line-2469"></a>      type: DT_UINT16
<a name="line-2470"></a>      type: DT_INT16
<a name="line-2471"></a>      type: DT_INT8
<a name="line-2472"></a>      type: DT_COMPLEX64
<a name="line-2473"></a>      type: DT_COMPLEX128
<a name="line-2474"></a>      type: DT_QINT8
<a name="line-2475"></a>      type: DT_QUINT8
<a name="line-2476"></a>      type: DT_QINT32
<a name="line-2477"></a>      type: DT_HALF
<a name="line-2478"></a>    }
<a name="line-2479"></a>  }
<a name="line-2480"></a>  name: "T"
<a name="line-2481"></a>  type: "type"
<a name="line-2482"></a>}
<a name="line-2483"></a>attr {
<a name="line-2484"></a>  description: "A small float number to avoid dividing by 0."
<a name="line-2485"></a>  name: "variance_epsilon"
<a name="line-2486"></a>  type: "float"
<a name="line-2487"></a>}
<a name="line-2488"></a>attr {
<a name="line-2489"></a>  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
<a name="line-2490"></a>  name: "scale_after_normalization"
<a name="line-2491"></a>  type: "bool"
<a name="line-2492"></a>}
<a name="line-2493"></a>input_arg {
<a name="line-2494"></a>  description: "A 4D input Tensor." name: "t" type_attr: "T"
<a name="line-2495"></a>}
<a name="line-2496"></a>input_arg {
<a name="line-2497"></a>  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-2498"></a>  name: "m"
<a name="line-2499"></a>  type_attr: "T"
<a name="line-2500"></a>}
<a name="line-2501"></a>input_arg {
<a name="line-2502"></a>  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-2503"></a>  name: "v"
<a name="line-2504"></a>  type_attr: "T"
<a name="line-2505"></a>}
<a name="line-2506"></a>input_arg {
<a name="line-2507"></a>  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor."
<a name="line-2508"></a>  name: "gamma"
<a name="line-2509"></a>  type_attr: "T"
<a name="line-2510"></a>}
<a name="line-2511"></a>input_arg {
<a name="line-2512"></a>  description: "4D backprop Tensor." name: "backprop" type_attr: "T"
<a name="line-2513"></a>}
<a name="line-2514"></a>output_arg {
<a name="line-2515"></a>  description: "4D backprop tensor for input."
<a name="line-2516"></a>  name: "dx"
<a name="line-2517"></a>  type_attr: "T"
<a name="line-2518"></a>}
<a name="line-2519"></a>output_arg {
<a name="line-2520"></a>  description: "1D backprop tensor for mean."
<a name="line-2521"></a>  name: "dm"
<a name="line-2522"></a>  type_attr: "T"
<a name="line-2523"></a>}
<a name="line-2524"></a>output_arg {
<a name="line-2525"></a>  description: "1D backprop tensor for variance."
<a name="line-2526"></a>  name: "dv"
<a name="line-2527"></a>  type_attr: "T"
<a name="line-2528"></a>}
<a name="line-2529"></a>output_arg {
<a name="line-2530"></a>  description: "1D backprop tensor for beta."
<a name="line-2531"></a>  name: "db"
<a name="line-2532"></a>  type_attr: "T"
<a name="line-2533"></a>}
<a name="line-2534"></a>output_arg {
<a name="line-2535"></a>  description: "1D backprop tensor for gamma."
<a name="line-2536"></a>  name: "dg"
<a name="line-2537"></a>  type_attr: "T"
<a name="line-2538"></a>}
<a name="line-2539"></a>-}</span>
<a name="line-2540"></a>
<a name="line-2541"></a><span class='hs-comment'>-- | </span>
<a name="line-2542"></a>
<a name="line-2543"></a><a name="batchFFT3D"></a><span class='hs-definition'>batchFFT3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2544"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-2545"></a><span class='hs-definition'>batchFFT3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2546"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchFFT3D"</span><span class='hs-layout'>)</span>
<a name="line-2547"></a>        <span class='hs-varid'>input</span>
<a name="line-2548"></a><span class='hs-comment'>{-
<a name="line-2549"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-2550"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-2551"></a>-}</span>
<a name="line-2552"></a>
<a name="line-2553"></a><span class='hs-comment'>-- | </span>
<a name="line-2554"></a>
<a name="line-2555"></a><a name="batchIFFT2D"></a><span class='hs-definition'>batchIFFT2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2556"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-2557"></a><span class='hs-definition'>batchIFFT2D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2558"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchIFFT2D"</span><span class='hs-layout'>)</span>
<a name="line-2559"></a>        <span class='hs-varid'>input</span>
<a name="line-2560"></a><span class='hs-comment'>{-
<a name="line-2561"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-2562"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-2563"></a>-}</span>
<a name="line-2564"></a>
<a name="line-2565"></a><a name="avgPool"></a><span class='hs-comment'>-- | Performs average pooling on the input.</span>
<a name="line-2566"></a><span class='hs-comment'>--</span>
<a name="line-2567"></a><span class='hs-comment'>-- Each entry in `output` is the mean of the corresponding size `ksize`</span>
<a name="line-2568"></a><span class='hs-comment'>-- window in `value`.</span>
<a name="line-2569"></a><span class='hs-definition'>avgPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-2570"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2571"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-2572"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The average pooled output tensor.</span>
<a name="line-2573"></a><span class='hs-definition'>avgPool</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2574"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AvgPool"</span>
<a name="line-2575"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2576"></a>        <span class='hs-varid'>value</span>
<a name="line-2577"></a><span class='hs-comment'>{-
<a name="line-2578"></a>attr {
<a name="line-2579"></a>  description: "The size of the sliding window for each dimension of `value`."
<a name="line-2580"></a>  has_minimum: true
<a name="line-2581"></a>  minimum: 4
<a name="line-2582"></a>  name: "ksize"
<a name="line-2583"></a>  type: "list(int)"
<a name="line-2584"></a>}
<a name="line-2585"></a>attr {
<a name="line-2586"></a>  description: "The stride of the sliding window for each dimension of `value`."
<a name="line-2587"></a>  has_minimum: true
<a name="line-2588"></a>  minimum: 4
<a name="line-2589"></a>  name: "strides"
<a name="line-2590"></a>  type: "list(int)"
<a name="line-2591"></a>}
<a name="line-2592"></a>attr {
<a name="line-2593"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-2594"></a>  description: "The type of padding algorithm to use."
<a name="line-2595"></a>  name: "padding"
<a name="line-2596"></a>  type: "string"
<a name="line-2597"></a>}
<a name="line-2598"></a>attr {
<a name="line-2599"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-2600"></a>  default_value { s: "NHWC" }
<a name="line-2601"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-2602"></a>  name: "data_format"
<a name="line-2603"></a>  type: "string"
<a name="line-2604"></a>}
<a name="line-2605"></a>attr {
<a name="line-2606"></a>  allowed_values {
<a name="line-2607"></a>    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
<a name="line-2608"></a>  }
<a name="line-2609"></a>  name: "T"
<a name="line-2610"></a>  type: "type"
<a name="line-2611"></a>}
<a name="line-2612"></a>input_arg {
<a name="line-2613"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-2614"></a>  name: "value"
<a name="line-2615"></a>  type_attr: "T"
<a name="line-2616"></a>}
<a name="line-2617"></a>output_arg {
<a name="line-2618"></a>  description: "The average pooled output tensor."
<a name="line-2619"></a>  name: "output"
<a name="line-2620"></a>  type_attr: "T"
<a name="line-2621"></a>}
<a name="line-2622"></a>-}</span>
<a name="line-2623"></a>
<a name="line-2624"></a><span class='hs-comment'>-- | </span>
<a name="line-2625"></a>
<a name="line-2626"></a><a name="batchFFT2D"></a><span class='hs-definition'>batchFFT2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2627"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-2628"></a><span class='hs-definition'>batchFFT2D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2629"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchFFT2D"</span><span class='hs-layout'>)</span>
<a name="line-2630"></a>        <span class='hs-varid'>input</span>
<a name="line-2631"></a><span class='hs-comment'>{-
<a name="line-2632"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-2633"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-2634"></a>-}</span>
<a name="line-2635"></a>
<a name="line-2636"></a><span class='hs-comment'>-- | </span>
<a name="line-2637"></a>
<a name="line-2638"></a><a name="batchFFT"></a><span class='hs-definition'>batchFFT</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2639"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-2640"></a><span class='hs-definition'>batchFFT</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2641"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchFFT"</span><span class='hs-layout'>)</span>
<a name="line-2642"></a>        <span class='hs-varid'>input</span>
<a name="line-2643"></a><span class='hs-comment'>{-
<a name="line-2644"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-2645"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-2646"></a>-}</span>
<a name="line-2647"></a>
<a name="line-2648"></a><a name="requantizationRange"></a><span class='hs-comment'>-- | Given a quantized tensor described by (input, input_min, input_max), outputs a</span>
<a name="line-2649"></a><span class='hs-comment'>--</span>
<a name="line-2650"></a><span class='hs-comment'>-- range that covers the actual values present in that tensor.  This op is</span>
<a name="line-2651"></a><span class='hs-comment'>-- typically used to produce the requested_output_min and requested_output_max for</span>
<a name="line-2652"></a><span class='hs-comment'>-- Requantize.</span>
<a name="line-2653"></a><span class='hs-definition'>requantizationRange</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tinput</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-2654"></a>                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2655"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2656"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2657"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2658"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2659"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_min__: The float value that the minimum quantized input value represents.</span>
<a name="line-2660"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_max__: The float value that the maximum quantized input value represents.</span>
<a name="line-2661"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-2662"></a>                       <span class='hs-comment'>-- ^ (__output_min__, __output_max__)</span>
<a name="line-2663"></a>                       <span class='hs-comment'>--</span>
<a name="line-2664"></a>                       <span class='hs-comment'>-- * __output_min__: The computed min output.</span>
<a name="line-2665"></a>                       <span class='hs-comment'>--</span>
<a name="line-2666"></a>                       <span class='hs-comment'>-- * __output_max__: the computed max output.</span>
<a name="line-2667"></a><span class='hs-definition'>requantizationRange</span> <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2668"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RequantizationRange"</span>
<a name="line-2669"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2670"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span>
<a name="line-2671"></a><span class='hs-comment'>{-
<a name="line-2672"></a>attr {
<a name="line-2673"></a>  allowed_values {
<a name="line-2674"></a>    list {
<a name="line-2675"></a>      type: DT_QINT8
<a name="line-2676"></a>      type: DT_QUINT8
<a name="line-2677"></a>      type: DT_QINT16
<a name="line-2678"></a>      type: DT_QUINT16
<a name="line-2679"></a>      type: DT_QINT32
<a name="line-2680"></a>    }
<a name="line-2681"></a>  }
<a name="line-2682"></a>  description: "The type of the input."
<a name="line-2683"></a>  name: "Tinput"
<a name="line-2684"></a>  type: "type"
<a name="line-2685"></a>}
<a name="line-2686"></a>input_arg { name: "input" type_attr: "Tinput" }
<a name="line-2687"></a>input_arg {
<a name="line-2688"></a>  description: "The float value that the minimum quantized input value represents."
<a name="line-2689"></a>  name: "input_min"
<a name="line-2690"></a>  type: DT_FLOAT
<a name="line-2691"></a>}
<a name="line-2692"></a>input_arg {
<a name="line-2693"></a>  description: "The float value that the maximum quantized input value represents."
<a name="line-2694"></a>  name: "input_max"
<a name="line-2695"></a>  type: DT_FLOAT
<a name="line-2696"></a>}
<a name="line-2697"></a>output_arg {
<a name="line-2698"></a>  description: "The computed min output."
<a name="line-2699"></a>  name: "output_min"
<a name="line-2700"></a>  type: DT_FLOAT
<a name="line-2701"></a>}
<a name="line-2702"></a>output_arg {
<a name="line-2703"></a>  description: "the computed max output."
<a name="line-2704"></a>  name: "output_max"
<a name="line-2705"></a>  type: DT_FLOAT
<a name="line-2706"></a>}
<a name="line-2707"></a>-}</span>
<a name="line-2708"></a>
<a name="line-2709"></a><a name="requantize"></a><span class='hs-comment'>-- | Convert the quantized 'input' tensor into a lower-precision 'output', using the</span>
<a name="line-2710"></a><span class='hs-comment'>--</span>
<a name="line-2711"></a><span class='hs-comment'>-- output range specified with 'requested_output_min' and 'requested_output_max'.</span>
<a name="line-2712"></a><span class='hs-comment'>-- </span>
<a name="line-2713"></a><span class='hs-comment'>-- [input_min, input_max] are scalar floats that specify the range for the float</span>
<a name="line-2714"></a><span class='hs-comment'>-- interpretation of the 'input' data. For example, if input_min is -1.0f and</span>
<a name="line-2715"></a><span class='hs-comment'>-- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0</span>
<a name="line-2716"></a><span class='hs-comment'>-- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.</span>
<a name="line-2717"></a><span class='hs-definition'>requantize</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>tinput</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-2718"></a>                                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2719"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2720"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2721"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-2722"></a>                                                       <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-2723"></a>                                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2724"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2725"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2726"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2727"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2728"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_min__: The float value that the minimum quantized input value represents.</span>
<a name="line-2729"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_max__: The float value that the maximum quantized input value represents.</span>
<a name="line-2730"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __requested_output_min__: The float value that the minimum quantized output value represents.</span>
<a name="line-2731"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __requested_output_max__: The float value that the maximum quantized output value represents.</span>
<a name="line-2732"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-2733"></a>              <span class='hs-comment'>-- ^ (__output__, __output_min__, __output_max__)</span>
<a name="line-2734"></a>              <span class='hs-comment'>--</span>
<a name="line-2735"></a>              <span class='hs-comment'>-- * __output__</span>
<a name="line-2736"></a>              <span class='hs-comment'>--</span>
<a name="line-2737"></a>              <span class='hs-comment'>-- * __output_min__: The requested_output_min value is copied into this output.</span>
<a name="line-2738"></a>              <span class='hs-comment'>--</span>
<a name="line-2739"></a>              <span class='hs-comment'>-- * __output_max__: The requested_output_max value is copied into this output.</span>
<a name="line-2740"></a><span class='hs-definition'>requantize</span> <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span> <span class='hs-varid'>requested_output_min</span>
<a name="line-2741"></a>           <span class='hs-varid'>requested_output_max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2742"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Requantize"</span>
<a name="line-2743"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-2744"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2745"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span> <span class='hs-varid'>requested_output_min</span> <span class='hs-varid'>requested_output_max</span>
<a name="line-2746"></a><span class='hs-comment'>{-
<a name="line-2747"></a>attr {
<a name="line-2748"></a>  allowed_values {
<a name="line-2749"></a>    list {
<a name="line-2750"></a>      type: DT_QINT8
<a name="line-2751"></a>      type: DT_QUINT8
<a name="line-2752"></a>      type: DT_QINT16
<a name="line-2753"></a>      type: DT_QUINT16
<a name="line-2754"></a>      type: DT_QINT32
<a name="line-2755"></a>    }
<a name="line-2756"></a>  }
<a name="line-2757"></a>  description: "The type of the input."
<a name="line-2758"></a>  name: "Tinput"
<a name="line-2759"></a>  type: "type"
<a name="line-2760"></a>}
<a name="line-2761"></a>attr {
<a name="line-2762"></a>  allowed_values {
<a name="line-2763"></a>    list {
<a name="line-2764"></a>      type: DT_QINT8
<a name="line-2765"></a>      type: DT_QUINT8
<a name="line-2766"></a>      type: DT_QINT16
<a name="line-2767"></a>      type: DT_QUINT16
<a name="line-2768"></a>      type: DT_QINT32
<a name="line-2769"></a>    }
<a name="line-2770"></a>  }
<a name="line-2771"></a>  description: "The type of the output. Should be a lower bit depth than Tinput."
<a name="line-2772"></a>  name: "out_type"
<a name="line-2773"></a>  type: "type"
<a name="line-2774"></a>}
<a name="line-2775"></a>input_arg { name: "input" type_attr: "Tinput" }
<a name="line-2776"></a>input_arg {
<a name="line-2777"></a>  description: "The float value that the minimum quantized input value represents."
<a name="line-2778"></a>  name: "input_min"
<a name="line-2779"></a>  type: DT_FLOAT
<a name="line-2780"></a>}
<a name="line-2781"></a>input_arg {
<a name="line-2782"></a>  description: "The float value that the maximum quantized input value represents."
<a name="line-2783"></a>  name: "input_max"
<a name="line-2784"></a>  type: DT_FLOAT
<a name="line-2785"></a>}
<a name="line-2786"></a>input_arg {
<a name="line-2787"></a>  description: "The float value that the minimum quantized output value represents."
<a name="line-2788"></a>  name: "requested_output_min"
<a name="line-2789"></a>  type: DT_FLOAT
<a name="line-2790"></a>}
<a name="line-2791"></a>input_arg {
<a name="line-2792"></a>  description: "The float value that the maximum quantized output value represents."
<a name="line-2793"></a>  name: "requested_output_max"
<a name="line-2794"></a>  type: DT_FLOAT
<a name="line-2795"></a>}
<a name="line-2796"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-2797"></a>output_arg {
<a name="line-2798"></a>  description: "The requested_output_min value is copied into this output."
<a name="line-2799"></a>  name: "output_min"
<a name="line-2800"></a>  type: DT_FLOAT
<a name="line-2801"></a>}
<a name="line-2802"></a>output_arg {
<a name="line-2803"></a>  description: "The requested_output_max value is copied into this output."
<a name="line-2804"></a>  name: "output_max"
<a name="line-2805"></a>  type: DT_FLOAT
<a name="line-2806"></a>}
<a name="line-2807"></a>-}</span>
<a name="line-2808"></a>
<a name="line-2809"></a><a name="quantizeDownAndShrinkRange"></a><span class='hs-comment'>-- | Convert the quantized 'input' tensor into a lower-precision 'output', using the</span>
<a name="line-2810"></a><span class='hs-comment'>--</span>
<a name="line-2811"></a><span class='hs-comment'>-- actual distribution of the values to maximize the usage of the lower bit depth</span>
<a name="line-2812"></a><span class='hs-comment'>-- and adjusting the output min and max ranges accordingly.</span>
<a name="line-2813"></a><span class='hs-comment'>-- </span>
<a name="line-2814"></a><span class='hs-comment'>-- [input_min, input_max] are scalar floats that specify the range for the float</span>
<a name="line-2815"></a><span class='hs-comment'>-- interpretation of the 'input' data. For example, if input_min is -1.0f and</span>
<a name="line-2816"></a><span class='hs-comment'>-- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0</span>
<a name="line-2817"></a><span class='hs-comment'>-- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.</span>
<a name="line-2818"></a><span class='hs-comment'>-- </span>
<a name="line-2819"></a><span class='hs-comment'>-- This operator tries to squeeze as much precision as possible into an output with</span>
<a name="line-2820"></a><span class='hs-comment'>-- a lower bit depth by calculating the actual min and max values found in the</span>
<a name="line-2821"></a><span class='hs-comment'>-- data. For example, maybe that quint16 input has no values lower than 16,384 and</span>
<a name="line-2822"></a><span class='hs-comment'>-- none higher than 49,152. That means only half the range is actually needed, all</span>
<a name="line-2823"></a><span class='hs-comment'>-- the float interpretations are between -0.5f and 0.5f, so if we want to compress</span>
<a name="line-2824"></a><span class='hs-comment'>-- the data into a quint8 output, we can use that range rather than the theoretical</span>
<a name="line-2825"></a><span class='hs-comment'>-- -1.0f to 1.0f that is suggested by the input min and max.</span>
<a name="line-2826"></a><span class='hs-comment'>-- </span>
<a name="line-2827"></a><span class='hs-comment'>-- In practice, this is most useful for taking output from operations like</span>
<a name="line-2828"></a><span class='hs-comment'>-- QuantizedMatMul that can produce higher bit-depth outputs than their inputs and</span>
<a name="line-2829"></a><span class='hs-comment'>-- may have large potential output ranges, but in practice have a distribution of</span>
<a name="line-2830"></a><span class='hs-comment'>-- input values that only uses a small fraction of the possible range. By feeding</span>
<a name="line-2831"></a><span class='hs-comment'>-- that output into this operator, we can reduce it from 32 bits down to 8 with</span>
<a name="line-2832"></a><span class='hs-comment'>-- minimal loss of accuracy.</span>
<a name="line-2833"></a><span class='hs-definition'>quantizeDownAndShrinkRange</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tinput</span>
<a name="line-2834"></a>                              <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-2835"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2836"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2837"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2838"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-2839"></a>                                          <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-2840"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2841"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2842"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2843"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2844"></a>                              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-2845"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_min__: The float value that the minimum quantized input value represents.</span>
<a name="line-2846"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_max__: The float value that the maximum quantized input value represents.</span>
<a name="line-2847"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-2848"></a>                                  <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-2849"></a>                              <span class='hs-comment'>-- ^ (__output__, __output_min__, __output_max__)</span>
<a name="line-2850"></a>                              <span class='hs-comment'>--</span>
<a name="line-2851"></a>                              <span class='hs-comment'>-- * __output__</span>
<a name="line-2852"></a>                              <span class='hs-comment'>--</span>
<a name="line-2853"></a>                              <span class='hs-comment'>-- * __output_min__: The float value that the minimum quantized output value represents.</span>
<a name="line-2854"></a>                              <span class='hs-comment'>--</span>
<a name="line-2855"></a>                              <span class='hs-comment'>-- * __output_max__: The float value that the maximum quantized output value represents.</span>
<a name="line-2856"></a><span class='hs-definition'>quantizeDownAndShrinkRange</span> <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2857"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizeDownAndShrinkRange"</span>
<a name="line-2858"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-2859"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2860"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span>
<a name="line-2861"></a><span class='hs-comment'>{-
<a name="line-2862"></a>attr {
<a name="line-2863"></a>  allowed_values {
<a name="line-2864"></a>    list {
<a name="line-2865"></a>      type: DT_QINT8
<a name="line-2866"></a>      type: DT_QUINT8
<a name="line-2867"></a>      type: DT_QINT16
<a name="line-2868"></a>      type: DT_QUINT16
<a name="line-2869"></a>      type: DT_QINT32
<a name="line-2870"></a>    }
<a name="line-2871"></a>  }
<a name="line-2872"></a>  description: "The type of the input."
<a name="line-2873"></a>  name: "Tinput"
<a name="line-2874"></a>  type: "type"
<a name="line-2875"></a>}
<a name="line-2876"></a>attr {
<a name="line-2877"></a>  allowed_values {
<a name="line-2878"></a>    list {
<a name="line-2879"></a>      type: DT_QINT8
<a name="line-2880"></a>      type: DT_QUINT8
<a name="line-2881"></a>      type: DT_QINT16
<a name="line-2882"></a>      type: DT_QUINT16
<a name="line-2883"></a>      type: DT_QINT32
<a name="line-2884"></a>    }
<a name="line-2885"></a>  }
<a name="line-2886"></a>  description: "The type of the output. Should be a lower bit depth than Tinput."
<a name="line-2887"></a>  name: "out_type"
<a name="line-2888"></a>  type: "type"
<a name="line-2889"></a>}
<a name="line-2890"></a>input_arg { name: "input" type_attr: "Tinput" }
<a name="line-2891"></a>input_arg {
<a name="line-2892"></a>  description: "The float value that the minimum quantized input value represents."
<a name="line-2893"></a>  name: "input_min"
<a name="line-2894"></a>  type: DT_FLOAT
<a name="line-2895"></a>}
<a name="line-2896"></a>input_arg {
<a name="line-2897"></a>  description: "The float value that the maximum quantized input value represents."
<a name="line-2898"></a>  name: "input_max"
<a name="line-2899"></a>  type: DT_FLOAT
<a name="line-2900"></a>}
<a name="line-2901"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-2902"></a>output_arg {
<a name="line-2903"></a>  description: "The float value that the minimum quantized output value represents."
<a name="line-2904"></a>  name: "output_min"
<a name="line-2905"></a>  type: DT_FLOAT
<a name="line-2906"></a>}
<a name="line-2907"></a>output_arg {
<a name="line-2908"></a>  description: "The float value that the maximum quantized output value represents."
<a name="line-2909"></a>  name: "output_max"
<a name="line-2910"></a>  type: DT_FLOAT
<a name="line-2911"></a>}
<a name="line-2912"></a>-}</span>
<a name="line-2913"></a>
<a name="line-2914"></a><a name="quantizedMatMul"></a><span class='hs-comment'>-- | Perform a quantized matrix multiplication of  `a` by the matrix `b`.</span>
<a name="line-2915"></a><span class='hs-comment'>--</span>
<a name="line-2916"></a><span class='hs-comment'>-- The inputs must be two-dimensional matrices and the inner dimension of</span>
<a name="line-2917"></a><span class='hs-comment'>-- `a` (after being transposed if `transpose_a` is non-zero) must match the</span>
<a name="line-2918"></a><span class='hs-comment'>-- outer dimension of `b` (after being transposed if `transposed_b` is</span>
<a name="line-2919"></a><span class='hs-comment'>-- non-zero).</span>
<a name="line-2920"></a><span class='hs-definition'>quantizedMatMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t1</span> <span class='hs-varid'>t2</span> <span class='hs-varid'>toutput</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t1</span><span class='hs-layout'>,</span>
<a name="line-2921"></a>                                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2922"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2923"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2924"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t1</span><span class='hs-layout'>,</span>
<a name="line-2925"></a>                                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t2</span><span class='hs-layout'>,</span>
<a name="line-2926"></a>                                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2927"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2928"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2929"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t2</span><span class='hs-layout'>,</span>
<a name="line-2930"></a>                                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>toutput</span><span class='hs-layout'>,</span>
<a name="line-2931"></a>                                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-2932"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-2933"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-2934"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>toutput</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-2935"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t1</span> <span class='hs-comment'>-- ^ __a__: Must be a two-dimensional tensor.</span>
<a name="line-2936"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t2</span> <span class='hs-comment'>-- ^ __b__: Must be a two-dimensional tensor.</span>
<a name="line-2937"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_a__: The float value that the lowest quantized `a` value represents.</span>
<a name="line-2938"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_a__: The float value that the highest quantized `a` value represents.</span>
<a name="line-2939"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_b__: The float value that the lowest quantized `b` value represents.</span>
<a name="line-2940"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_b__: The float value that the highest quantized `b` value represents.</span>
<a name="line-2941"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>toutput</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-2942"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-2943"></a>                   <span class='hs-comment'>-- ^ (__out__, __min_out__, __max_out__)</span>
<a name="line-2944"></a>                   <span class='hs-comment'>--</span>
<a name="line-2945"></a>                   <span class='hs-comment'>-- * __out__</span>
<a name="line-2946"></a>                   <span class='hs-comment'>--</span>
<a name="line-2947"></a>                   <span class='hs-comment'>-- * __min_out__: The float value that the lowest quantized output value represents.</span>
<a name="line-2948"></a>                   <span class='hs-comment'>--</span>
<a name="line-2949"></a>                   <span class='hs-comment'>-- * __max_out__: The float value that the highest quantized output value represents.</span>
<a name="line-2950"></a><span class='hs-definition'>quantizedMatMul</span> <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-varid'>min_a</span> <span class='hs-varid'>max_a</span> <span class='hs-varid'>min_b</span> <span class='hs-varid'>max_b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-2951"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedMatMul"</span>
<a name="line-2952"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T1"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t1</span><span class='hs-layout'>)</span>
<a name="line-2953"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T2"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t2</span><span class='hs-layout'>)</span>
<a name="line-2954"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Toutput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>toutput</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-2955"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-varid'>min_a</span> <span class='hs-varid'>max_a</span> <span class='hs-varid'>min_b</span> <span class='hs-varid'>max_b</span>
<a name="line-2956"></a><span class='hs-comment'>{-
<a name="line-2957"></a>attr {
<a name="line-2958"></a>  allowed_values {
<a name="line-2959"></a>    list {
<a name="line-2960"></a>      type: DT_QINT8
<a name="line-2961"></a>      type: DT_QUINT8
<a name="line-2962"></a>      type: DT_QINT16
<a name="line-2963"></a>      type: DT_QUINT16
<a name="line-2964"></a>      type: DT_QINT32
<a name="line-2965"></a>    }
<a name="line-2966"></a>  }
<a name="line-2967"></a>  name: "T1"
<a name="line-2968"></a>  type: "type"
<a name="line-2969"></a>}
<a name="line-2970"></a>attr {
<a name="line-2971"></a>  allowed_values {
<a name="line-2972"></a>    list {
<a name="line-2973"></a>      type: DT_QINT8
<a name="line-2974"></a>      type: DT_QUINT8
<a name="line-2975"></a>      type: DT_QINT16
<a name="line-2976"></a>      type: DT_QUINT16
<a name="line-2977"></a>      type: DT_QINT32
<a name="line-2978"></a>    }
<a name="line-2979"></a>  }
<a name="line-2980"></a>  name: "T2"
<a name="line-2981"></a>  type: "type"
<a name="line-2982"></a>}
<a name="line-2983"></a>attr {
<a name="line-2984"></a>  allowed_values {
<a name="line-2985"></a>    list {
<a name="line-2986"></a>      type: DT_QINT8
<a name="line-2987"></a>      type: DT_QUINT8
<a name="line-2988"></a>      type: DT_QINT16
<a name="line-2989"></a>      type: DT_QUINT16
<a name="line-2990"></a>      type: DT_QINT32
<a name="line-2991"></a>    }
<a name="line-2992"></a>  }
<a name="line-2993"></a>  default_value { type: DT_QINT32 }
<a name="line-2994"></a>  name: "Toutput"
<a name="line-2995"></a>  type: "type"
<a name="line-2996"></a>}
<a name="line-2997"></a>attr {
<a name="line-2998"></a>  default_value { b: false }
<a name="line-2999"></a>  description: "If true, `a` is transposed before multiplication."
<a name="line-3000"></a>  name: "transpose_a"
<a name="line-3001"></a>  type: "bool"
<a name="line-3002"></a>}
<a name="line-3003"></a>attr {
<a name="line-3004"></a>  default_value { b: false }
<a name="line-3005"></a>  description: "If true, `b` is transposed before multiplication."
<a name="line-3006"></a>  name: "transpose_b"
<a name="line-3007"></a>  type: "bool"
<a name="line-3008"></a>}
<a name="line-3009"></a>input_arg {
<a name="line-3010"></a>  description: "Must be a two-dimensional tensor."
<a name="line-3011"></a>  name: "a"
<a name="line-3012"></a>  type_attr: "T1"
<a name="line-3013"></a>}
<a name="line-3014"></a>input_arg {
<a name="line-3015"></a>  description: "Must be a two-dimensional tensor."
<a name="line-3016"></a>  name: "b"
<a name="line-3017"></a>  type_attr: "T2"
<a name="line-3018"></a>}
<a name="line-3019"></a>input_arg {
<a name="line-3020"></a>  description: "The float value that the lowest quantized `a` value represents."
<a name="line-3021"></a>  name: "min_a"
<a name="line-3022"></a>  type: DT_FLOAT
<a name="line-3023"></a>}
<a name="line-3024"></a>input_arg {
<a name="line-3025"></a>  description: "The float value that the highest quantized `a` value represents."
<a name="line-3026"></a>  name: "max_a"
<a name="line-3027"></a>  type: DT_FLOAT
<a name="line-3028"></a>}
<a name="line-3029"></a>input_arg {
<a name="line-3030"></a>  description: "The float value that the lowest quantized `b` value represents."
<a name="line-3031"></a>  name: "min_b"
<a name="line-3032"></a>  type: DT_FLOAT
<a name="line-3033"></a>}
<a name="line-3034"></a>input_arg {
<a name="line-3035"></a>  description: "The float value that the highest quantized `b` value represents."
<a name="line-3036"></a>  name: "max_b"
<a name="line-3037"></a>  type: DT_FLOAT
<a name="line-3038"></a>}
<a name="line-3039"></a>output_arg { name: "out" type_attr: "Toutput" }
<a name="line-3040"></a>output_arg {
<a name="line-3041"></a>  description: "The float value that the lowest quantized output value represents."
<a name="line-3042"></a>  name: "min_out"
<a name="line-3043"></a>  type: DT_FLOAT
<a name="line-3044"></a>}
<a name="line-3045"></a>output_arg {
<a name="line-3046"></a>  description: "The float value that the highest quantized output value represents."
<a name="line-3047"></a>  name: "max_out"
<a name="line-3048"></a>  type: DT_FLOAT
<a name="line-3049"></a>}
<a name="line-3050"></a>-}</span>
<a name="line-3051"></a>
<a name="line-3052"></a><a name="cumprod"></a><span class='hs-comment'>-- | Compute the cumulative product of the tensor `x` along `axis`.</span>
<a name="line-3053"></a><span class='hs-comment'>--</span>
<a name="line-3054"></a><span class='hs-comment'>-- By default, this op performs an inclusive cumprod, which means that the first</span>
<a name="line-3055"></a><span class='hs-comment'>-- element of the input is identical to the first element of the output:</span>
<a name="line-3056"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3057"></a><span class='hs-comment'>-- tf.cumprod([a, b, c]) ==&gt; [a, a * b, a * b * c]</span>
<a name="line-3058"></a><span class='hs-comment'>-- ```</span>
<a name="line-3059"></a><span class='hs-comment'>-- </span>
<a name="line-3060"></a><span class='hs-comment'>-- By setting the `exclusive` kwarg to `True`, an exclusive cumprod is</span>
<a name="line-3061"></a><span class='hs-comment'>-- performed instead:</span>
<a name="line-3062"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3063"></a><span class='hs-comment'>-- tf.cumprod([a, b, c], exclusive=True) ==&gt; [0, a, a * b]</span>
<a name="line-3064"></a><span class='hs-comment'>-- ```</span>
<a name="line-3065"></a><span class='hs-comment'>-- </span>
<a name="line-3066"></a><span class='hs-comment'>-- By setting the `reverse` kwarg to `True`, the cumprod is performed in the</span>
<a name="line-3067"></a><span class='hs-comment'>-- opposite direction:</span>
<a name="line-3068"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3069"></a><span class='hs-comment'>-- tf.cumprod([a, b, c], reverse=True) ==&gt; [a * b * c, b * c, c]</span>
<a name="line-3070"></a><span class='hs-comment'>-- ```</span>
<a name="line-3071"></a><span class='hs-comment'>-- This is more efficient than using separate `tf.reverse` ops.</span>
<a name="line-3072"></a><span class='hs-comment'>-- </span>
<a name="line-3073"></a><span class='hs-comment'>-- The `reverse` and `exclusive` kwargs can also be combined:</span>
<a name="line-3074"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3075"></a><span class='hs-comment'>-- tf.cumprod([a, b, c], exclusive=True, reverse=True) ==&gt; [b * c, c, 0]</span>
<a name="line-3076"></a><span class='hs-comment'>-- ```</span>
<a name="line-3077"></a><span class='hs-definition'>cumprod</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3078"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3079"></a>                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3080"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3081"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3082"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-3083"></a>                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-3084"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3085"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3086"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-3087"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __axis__</span>
<a name="line-3088"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out__</span>
<a name="line-3089"></a><span class='hs-definition'>cumprod</span> <span class='hs-varid'>x</span> <span class='hs-varid'>axis</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3090"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cumprod"</span>
<a name="line-3091"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3092"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3093"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>axis</span>
<a name="line-3094"></a><span class='hs-comment'>{-
<a name="line-3095"></a>attr { default_value { b: false } name: "exclusive" type: "bool" }
<a name="line-3096"></a>attr { default_value { b: false } name: "reverse" type: "bool" }
<a name="line-3097"></a>attr {
<a name="line-3098"></a>  allowed_values {
<a name="line-3099"></a>    list {
<a name="line-3100"></a>      type: DT_FLOAT
<a name="line-3101"></a>      type: DT_DOUBLE
<a name="line-3102"></a>      type: DT_INT64
<a name="line-3103"></a>      type: DT_INT32
<a name="line-3104"></a>      type: DT_UINT8
<a name="line-3105"></a>      type: DT_UINT16
<a name="line-3106"></a>      type: DT_INT16
<a name="line-3107"></a>      type: DT_INT8
<a name="line-3108"></a>      type: DT_COMPLEX64
<a name="line-3109"></a>      type: DT_COMPLEX128
<a name="line-3110"></a>      type: DT_QINT8
<a name="line-3111"></a>      type: DT_QUINT8
<a name="line-3112"></a>      type: DT_QINT32
<a name="line-3113"></a>      type: DT_HALF
<a name="line-3114"></a>    }
<a name="line-3115"></a>  }
<a name="line-3116"></a>  name: "T"
<a name="line-3117"></a>  type: "type"
<a name="line-3118"></a>}
<a name="line-3119"></a>attr {
<a name="line-3120"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3121"></a>  default_value { type: DT_INT32 }
<a name="line-3122"></a>  name: "Tidx"
<a name="line-3123"></a>  type: "type"
<a name="line-3124"></a>}
<a name="line-3125"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-3126"></a>input_arg { name: "axis" type_attr: "Tidx" }
<a name="line-3127"></a>output_arg { name: "out" type_attr: "T" }
<a name="line-3128"></a>-}</span>
<a name="line-3129"></a>
<a name="line-3130"></a><a name="cumsum"></a><span class='hs-comment'>-- | Compute the cumulative sum of the tensor `x` along `axis`.</span>
<a name="line-3131"></a><span class='hs-comment'>--</span>
<a name="line-3132"></a><span class='hs-comment'>-- By default, this op performs an inclusive cumsum, which means that the first</span>
<a name="line-3133"></a><span class='hs-comment'>-- element of the input is identical to the first element of the output:</span>
<a name="line-3134"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3135"></a><span class='hs-comment'>-- tf.cumsum([a, b, c]) ==&gt; [a, a + b, a + b + c]</span>
<a name="line-3136"></a><span class='hs-comment'>-- ```</span>
<a name="line-3137"></a><span class='hs-comment'>-- </span>
<a name="line-3138"></a><span class='hs-comment'>-- By setting the `exclusive` kwarg to `True`, an exclusive cumsum is</span>
<a name="line-3139"></a><span class='hs-comment'>-- performed instead:</span>
<a name="line-3140"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3141"></a><span class='hs-comment'>-- tf.cumsum([a, b, c], exclusive=True) ==&gt; [0, a, a + b]</span>
<a name="line-3142"></a><span class='hs-comment'>-- ```</span>
<a name="line-3143"></a><span class='hs-comment'>-- </span>
<a name="line-3144"></a><span class='hs-comment'>-- By setting the `reverse` kwarg to `True`, the cumsum is performed in the</span>
<a name="line-3145"></a><span class='hs-comment'>-- opposite direction:</span>
<a name="line-3146"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3147"></a><span class='hs-comment'>-- tf.cumsum([a, b, c], reverse=True) ==&gt; [a + b + c, b + c, c]</span>
<a name="line-3148"></a><span class='hs-comment'>-- ```</span>
<a name="line-3149"></a><span class='hs-comment'>-- This is more efficient than using separate `tf.reverse` ops.</span>
<a name="line-3150"></a><span class='hs-comment'>-- </span>
<a name="line-3151"></a><span class='hs-comment'>-- The `reverse` and `exclusive` kwargs can also be combined:</span>
<a name="line-3152"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3153"></a><span class='hs-comment'>-- tf.cumsum([a, b, c], exclusive=True, reverse=True) ==&gt; [b + c, c, 0]</span>
<a name="line-3154"></a><span class='hs-comment'>-- ```</span>
<a name="line-3155"></a><span class='hs-definition'>cumsum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3156"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3157"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3158"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3159"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3160"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-3161"></a>                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-3162"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3163"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3164"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-3165"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __axis__</span>
<a name="line-3166"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out__</span>
<a name="line-3167"></a><span class='hs-definition'>cumsum</span> <span class='hs-varid'>x</span> <span class='hs-varid'>axis</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3168"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cumsum"</span>
<a name="line-3169"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3170"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3171"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>axis</span>
<a name="line-3172"></a><span class='hs-comment'>{-
<a name="line-3173"></a>attr { default_value { b: false } name: "exclusive" type: "bool" }
<a name="line-3174"></a>attr { default_value { b: false } name: "reverse" type: "bool" }
<a name="line-3175"></a>attr {
<a name="line-3176"></a>  allowed_values {
<a name="line-3177"></a>    list {
<a name="line-3178"></a>      type: DT_FLOAT
<a name="line-3179"></a>      type: DT_DOUBLE
<a name="line-3180"></a>      type: DT_INT64
<a name="line-3181"></a>      type: DT_INT32
<a name="line-3182"></a>      type: DT_UINT8
<a name="line-3183"></a>      type: DT_UINT16
<a name="line-3184"></a>      type: DT_INT16
<a name="line-3185"></a>      type: DT_INT8
<a name="line-3186"></a>      type: DT_COMPLEX64
<a name="line-3187"></a>      type: DT_COMPLEX128
<a name="line-3188"></a>      type: DT_QINT8
<a name="line-3189"></a>      type: DT_QUINT8
<a name="line-3190"></a>      type: DT_QINT32
<a name="line-3191"></a>      type: DT_HALF
<a name="line-3192"></a>    }
<a name="line-3193"></a>  }
<a name="line-3194"></a>  name: "T"
<a name="line-3195"></a>  type: "type"
<a name="line-3196"></a>}
<a name="line-3197"></a>attr {
<a name="line-3198"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3199"></a>  default_value { type: DT_INT32 }
<a name="line-3200"></a>  name: "Tidx"
<a name="line-3201"></a>  type: "type"
<a name="line-3202"></a>}
<a name="line-3203"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-3204"></a>input_arg { name: "axis" type_attr: "Tidx" }
<a name="line-3205"></a>output_arg { name: "out" type_attr: "T" }
<a name="line-3206"></a>-}</span>
<a name="line-3207"></a>
<a name="line-3208"></a><a name="cross"></a><span class='hs-comment'>-- | Compute the pairwise cross product.</span>
<a name="line-3209"></a><span class='hs-comment'>--</span>
<a name="line-3210"></a><span class='hs-comment'>-- `a` and `b` must be the same shape; they can either be simple 3-element vectors,</span>
<a name="line-3211"></a><span class='hs-comment'>-- or any shape where the innermost dimension is 3. In the latter case, each pair</span>
<a name="line-3212"></a><span class='hs-comment'>-- of corresponding 3-element vectors is cross-multiplied independently.</span>
<a name="line-3213"></a><span class='hs-definition'>cross</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3214"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3215"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3216"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-3217"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3218"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__: A tensor containing 3-element vectors.</span>
<a name="line-3219"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b__: Another tensor, of same type and shape as `a`.</span>
<a name="line-3220"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __product__: Pairwise cross product of the vectors in `a` and `b`.</span>
<a name="line-3221"></a><span class='hs-definition'>cross</span> <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3222"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cross"</span>
<a name="line-3223"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3224"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>b</span>
<a name="line-3225"></a><span class='hs-comment'>{-
<a name="line-3226"></a>attr {
<a name="line-3227"></a>  allowed_values {
<a name="line-3228"></a>    list {
<a name="line-3229"></a>      type: DT_FLOAT
<a name="line-3230"></a>      type: DT_DOUBLE
<a name="line-3231"></a>      type: DT_INT32
<a name="line-3232"></a>      type: DT_INT64
<a name="line-3233"></a>      type: DT_UINT8
<a name="line-3234"></a>      type: DT_INT16
<a name="line-3235"></a>      type: DT_INT8
<a name="line-3236"></a>      type: DT_UINT16
<a name="line-3237"></a>      type: DT_HALF
<a name="line-3238"></a>    }
<a name="line-3239"></a>  }
<a name="line-3240"></a>  name: "T"
<a name="line-3241"></a>  type: "type"
<a name="line-3242"></a>}
<a name="line-3243"></a>input_arg {
<a name="line-3244"></a>  description: "A tensor containing 3-element vectors."
<a name="line-3245"></a>  name: "a"
<a name="line-3246"></a>  type_attr: "T"
<a name="line-3247"></a>}
<a name="line-3248"></a>input_arg {
<a name="line-3249"></a>  description: "Another tensor, of same type and shape as `a`."
<a name="line-3250"></a>  name: "b"
<a name="line-3251"></a>  type_attr: "T"
<a name="line-3252"></a>}
<a name="line-3253"></a>output_arg {
<a name="line-3254"></a>  description: "Pairwise cross product of the vectors in `a` and `b`."
<a name="line-3255"></a>  name: "product"
<a name="line-3256"></a>  type_attr: "T"
<a name="line-3257"></a>}
<a name="line-3258"></a>-}</span>
<a name="line-3259"></a>
<a name="line-3260"></a><a name="iFFT3D"></a><span class='hs-comment'>-- | Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most</span>
<a name="line-3261"></a><span class='hs-comment'>--</span>
<a name="line-3262"></a><span class='hs-comment'>-- 3 dimensions of `input`.</span>
<a name="line-3263"></a><span class='hs-definition'>iFFT3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-3264"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3</span>
<a name="line-3265"></a>          <span class='hs-comment'>--   dimensions of `input` are replaced with their inverse 3D Fourier Transform.</span>
<a name="line-3266"></a>          <span class='hs-comment'>-- </span>
<a name="line-3267"></a>          <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-3268"></a>          <span class='hs-comment'>-- Equivalent to np.fft3</span>
<a name="line-3269"></a>          <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-3270"></a><span class='hs-definition'>iFFT3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3271"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IFFT3D"</span><span class='hs-layout'>)</span>
<a name="line-3272"></a>        <span class='hs-varid'>input</span>
<a name="line-3273"></a><span class='hs-comment'>{-
<a name="line-3274"></a>input_arg {
<a name="line-3275"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-3276"></a>}
<a name="line-3277"></a>output_arg {
<a name="line-3278"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their inverse 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft3\n@end_compatibility"
<a name="line-3279"></a>  name: "output"
<a name="line-3280"></a>  type: DT_COMPLEX64
<a name="line-3281"></a>}
<a name="line-3282"></a>-}</span>
<a name="line-3283"></a>
<a name="line-3284"></a><a name="fFT3D"></a><span class='hs-comment'>-- | Compute the 3-dimensional discrete Fourier Transform over the inner-most 3</span>
<a name="line-3285"></a><span class='hs-comment'>--</span>
<a name="line-3286"></a><span class='hs-comment'>-- dimensions of `input`.</span>
<a name="line-3287"></a><span class='hs-definition'>fFT3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-3288"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3</span>
<a name="line-3289"></a>         <span class='hs-comment'>--   dimensions of `input` are replaced with their 3D Fourier Transform.</span>
<a name="line-3290"></a>         <span class='hs-comment'>-- </span>
<a name="line-3291"></a>         <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-3292"></a>         <span class='hs-comment'>-- Equivalent to np.fft3</span>
<a name="line-3293"></a>         <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-3294"></a><span class='hs-definition'>fFT3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3295"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FFT3D"</span><span class='hs-layout'>)</span>
<a name="line-3296"></a>        <span class='hs-varid'>input</span>
<a name="line-3297"></a><span class='hs-comment'>{-
<a name="line-3298"></a>input_arg {
<a name="line-3299"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-3300"></a>}
<a name="line-3301"></a>output_arg {
<a name="line-3302"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft3\n@end_compatibility"
<a name="line-3303"></a>  name: "output"
<a name="line-3304"></a>  type: DT_COMPLEX64
<a name="line-3305"></a>}
<a name="line-3306"></a>-}</span>
<a name="line-3307"></a>
<a name="line-3308"></a><span class='hs-comment'>-- | Computes gradients of the maxpooling function.</span>
<a name="line-3309"></a>
<a name="line-3310"></a><a name="maxPoolGradWithArgmax"></a><span class='hs-definition'>maxPoolGradWithArgmax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>targmax</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>,</span>
<a name="line-3311"></a>                                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3312"></a>                                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>,</span>
<a name="line-3313"></a>                                                      <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3314"></a>                                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3315"></a>                                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3316"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The original input.</span>
<a name="line-3317"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the</span>
<a name="line-3318"></a>                                        <span class='hs-comment'>-- output of `max_pool`.</span>
<a name="line-3319"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>targmax</span> <span class='hs-comment'>-- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.</span>
<a name="line-3320"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Gradients w.r.t. the input of `max_pool`.</span>
<a name="line-3321"></a><span class='hs-definition'>maxPoolGradWithArgmax</span> <span class='hs-varid'>input</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>argmax</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3322"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPoolGradWithArgmax"</span>
<a name="line-3323"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Targmax"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>)</span>
<a name="line-3324"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3325"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>argmax</span>
<a name="line-3326"></a><span class='hs-comment'>{-
<a name="line-3327"></a>attr {
<a name="line-3328"></a>  description: "The size of the window for each dimension of the input tensor."
<a name="line-3329"></a>  has_minimum: true
<a name="line-3330"></a>  minimum: 4
<a name="line-3331"></a>  name: "ksize"
<a name="line-3332"></a>  type: "list(int)"
<a name="line-3333"></a>}
<a name="line-3334"></a>attr {
<a name="line-3335"></a>  description: "The stride of the sliding window for each dimension of the\ninput tensor."
<a name="line-3336"></a>  has_minimum: true
<a name="line-3337"></a>  minimum: 4
<a name="line-3338"></a>  name: "strides"
<a name="line-3339"></a>  type: "list(int)"
<a name="line-3340"></a>}
<a name="line-3341"></a>attr {
<a name="line-3342"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-3343"></a>  description: "The type of padding algorithm to use."
<a name="line-3344"></a>  name: "padding"
<a name="line-3345"></a>  type: "string"
<a name="line-3346"></a>}
<a name="line-3347"></a>attr {
<a name="line-3348"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3349"></a>  name: "Targmax"
<a name="line-3350"></a>  type: "type"
<a name="line-3351"></a>}
<a name="line-3352"></a>attr {
<a name="line-3353"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-3354"></a>  default_value { type: DT_FLOAT }
<a name="line-3355"></a>  name: "T"
<a name="line-3356"></a>  type: "type"
<a name="line-3357"></a>}
<a name="line-3358"></a>input_arg {
<a name="line-3359"></a>  description: "The original input." name: "input" type_attr: "T"
<a name="line-3360"></a>}
<a name="line-3361"></a>input_arg {
<a name="line-3362"></a>  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the\noutput of `max_pool`."
<a name="line-3363"></a>  name: "grad"
<a name="line-3364"></a>  type_attr: "T"
<a name="line-3365"></a>}
<a name="line-3366"></a>input_arg {
<a name="line-3367"></a>  description: "The indices of the maximum values chosen for each output of `max_pool`."
<a name="line-3368"></a>  name: "argmax"
<a name="line-3369"></a>  type_attr: "Targmax"
<a name="line-3370"></a>}
<a name="line-3371"></a>output_arg {
<a name="line-3372"></a>  description: "Gradients w.r.t. the input of `max_pool`."
<a name="line-3373"></a>  name: "output"
<a name="line-3374"></a>  type_attr: "T"
<a name="line-3375"></a>}
<a name="line-3376"></a>-}</span>
<a name="line-3377"></a>
<a name="line-3378"></a><a name="fFT2D"></a><span class='hs-comment'>-- | Compute the 2-dimensional discrete Fourier Transform over the inner-most</span>
<a name="line-3379"></a><span class='hs-comment'>--</span>
<a name="line-3380"></a><span class='hs-comment'>-- 2 dimensions of `input`.</span>
<a name="line-3381"></a><span class='hs-definition'>fFT2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-3382"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2</span>
<a name="line-3383"></a>         <span class='hs-comment'>--   dimensions of `input` are replaced with their 2D Fourier Transform.</span>
<a name="line-3384"></a>         <span class='hs-comment'>-- </span>
<a name="line-3385"></a>         <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-3386"></a>         <span class='hs-comment'>-- Equivalent to np.fft2</span>
<a name="line-3387"></a>         <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-3388"></a><span class='hs-definition'>fFT2D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3389"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FFT2D"</span><span class='hs-layout'>)</span>
<a name="line-3390"></a>        <span class='hs-varid'>input</span>
<a name="line-3391"></a><span class='hs-comment'>{-
<a name="line-3392"></a>input_arg {
<a name="line-3393"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-3394"></a>}
<a name="line-3395"></a>output_arg {
<a name="line-3396"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft2\n@end_compatibility"
<a name="line-3397"></a>  name: "output"
<a name="line-3398"></a>  type: DT_COMPLEX64
<a name="line-3399"></a>}
<a name="line-3400"></a>-}</span>
<a name="line-3401"></a>
<a name="line-3402"></a><a name="iFFT"></a><span class='hs-comment'>-- | Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most</span>
<a name="line-3403"></a><span class='hs-comment'>--</span>
<a name="line-3404"></a><span class='hs-comment'>-- dimension of `input`.</span>
<a name="line-3405"></a><span class='hs-definition'>iFFT</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-3406"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most</span>
<a name="line-3407"></a>        <span class='hs-comment'>-- dimension of `input` is replaced with its inverse 1D Fourier Transform.</span>
<a name="line-3408"></a><span class='hs-definition'>iFFT</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3409"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IFFT"</span><span class='hs-layout'>)</span>
<a name="line-3410"></a>        <span class='hs-varid'>input</span>
<a name="line-3411"></a><span class='hs-comment'>{-
<a name="line-3412"></a>input_arg {
<a name="line-3413"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-3414"></a>}
<a name="line-3415"></a>output_arg {
<a name="line-3416"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its inverse 1D Fourier Transform."
<a name="line-3417"></a>  name: "output"
<a name="line-3418"></a>  type: DT_COMPLEX64
<a name="line-3419"></a>}
<a name="line-3420"></a>-}</span>
<a name="line-3421"></a>
<a name="line-3422"></a><a name="fFT"></a><span class='hs-comment'>-- | Compute the 1-dimensional discrete Fourier Transform over the inner-most</span>
<a name="line-3423"></a><span class='hs-comment'>--</span>
<a name="line-3424"></a><span class='hs-comment'>-- dimension of `input`.</span>
<a name="line-3425"></a><span class='hs-definition'>fFT</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-3426"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most</span>
<a name="line-3427"></a>       <span class='hs-comment'>-- dimension of `input` is replaced with its 1D Fourier Transform.</span>
<a name="line-3428"></a><span class='hs-definition'>fFT</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3429"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FFT"</span><span class='hs-layout'>)</span>
<a name="line-3430"></a>        <span class='hs-varid'>input</span>
<a name="line-3431"></a><span class='hs-comment'>{-
<a name="line-3432"></a>input_arg {
<a name="line-3433"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-3434"></a>}
<a name="line-3435"></a>output_arg {
<a name="line-3436"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its 1D Fourier Transform."
<a name="line-3437"></a>  name: "output"
<a name="line-3438"></a>  type: DT_COMPLEX64
<a name="line-3439"></a>}
<a name="line-3440"></a>-}</span>
<a name="line-3441"></a>
<a name="line-3442"></a><a name="conj"></a><span class='hs-comment'>-- | Returns the complex conjugate of a complex number.</span>
<a name="line-3443"></a><span class='hs-comment'>--</span>
<a name="line-3444"></a><span class='hs-comment'>-- Given a tensor `input` of complex numbers, this operation returns a tensor of</span>
<a name="line-3445"></a><span class='hs-comment'>-- complex numbers that are the complex conjugate of each element in `input`. The</span>
<a name="line-3446"></a><span class='hs-comment'>-- complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the</span>
<a name="line-3447"></a><span class='hs-comment'>-- real part and *b* is the imaginary part.</span>
<a name="line-3448"></a><span class='hs-comment'>-- </span>
<a name="line-3449"></a><span class='hs-comment'>-- The complex conjugate returned by this operation is of the form \\(a - bj\\).</span>
<a name="line-3450"></a><span class='hs-comment'>-- </span>
<a name="line-3451"></a><span class='hs-comment'>-- For example:</span>
<a name="line-3452"></a><span class='hs-comment'>-- </span>
<a name="line-3453"></a><span class='hs-comment'>-- ```</span>
<a name="line-3454"></a><span class='hs-comment'>-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]</span>
<a name="line-3455"></a><span class='hs-comment'>-- tf.conj(input) ==&gt; [-2.25 - 4.75j, 3.25 - 5.75j]</span>
<a name="line-3456"></a><span class='hs-comment'>-- ```</span>
<a name="line-3457"></a><span class='hs-definition'>conj</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3458"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3459"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-3460"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-3461"></a><span class='hs-definition'>conj</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3462"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conj"</span>
<a name="line-3463"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3464"></a>        <span class='hs-varid'>input</span>
<a name="line-3465"></a><span class='hs-comment'>{-
<a name="line-3466"></a>attr {
<a name="line-3467"></a>  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
<a name="line-3468"></a>  default_value { type: DT_COMPLEX64 }
<a name="line-3469"></a>  name: "T"
<a name="line-3470"></a>  type: "type"
<a name="line-3471"></a>}
<a name="line-3472"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-3473"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-3474"></a>-}</span>
<a name="line-3475"></a>
<a name="line-3476"></a><a name="real"></a><span class='hs-comment'>-- | Returns the real part of a complex number.</span>
<a name="line-3477"></a><span class='hs-comment'>--</span>
<a name="line-3478"></a><span class='hs-comment'>-- Given a tensor `input` of complex numbers, this operation returns a tensor of</span>
<a name="line-3479"></a><span class='hs-comment'>-- type `float` that is the real part of each element in `input`. All elements in</span>
<a name="line-3480"></a><span class='hs-comment'>-- `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real</span>
<a name="line-3481"></a><span class='hs-comment'>--  part returned by this operation and *b* is the imaginary part.</span>
<a name="line-3482"></a><span class='hs-comment'>-- </span>
<a name="line-3483"></a><span class='hs-comment'>-- For example:</span>
<a name="line-3484"></a><span class='hs-comment'>-- </span>
<a name="line-3485"></a><span class='hs-comment'>-- ```</span>
<a name="line-3486"></a><span class='hs-comment'>-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]</span>
<a name="line-3487"></a><span class='hs-comment'>-- tf.real(input) ==&gt; [-2.25, 3.25]</span>
<a name="line-3488"></a><span class='hs-comment'>-- ```</span>
<a name="line-3489"></a><span class='hs-definition'>real</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3490"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3491"></a>                            <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3492"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-3493"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-3494"></a><span class='hs-definition'>real</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3495"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Real"</span>
<a name="line-3496"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3497"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3498"></a>        <span class='hs-varid'>input</span>
<a name="line-3499"></a><span class='hs-comment'>{-
<a name="line-3500"></a>attr {
<a name="line-3501"></a>  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
<a name="line-3502"></a>  default_value { type: DT_COMPLEX64 }
<a name="line-3503"></a>  name: "T"
<a name="line-3504"></a>  type: "type"
<a name="line-3505"></a>}
<a name="line-3506"></a>attr {
<a name="line-3507"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-3508"></a>  default_value { type: DT_FLOAT }
<a name="line-3509"></a>  name: "Tout"
<a name="line-3510"></a>  type: "type"
<a name="line-3511"></a>}
<a name="line-3512"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-3513"></a>output_arg { name: "output" type_attr: "Tout" }
<a name="line-3514"></a>-}</span>
<a name="line-3515"></a>
<a name="line-3516"></a><a name="complex"></a><span class='hs-comment'>-- | Converts two real numbers to a complex number.</span>
<a name="line-3517"></a><span class='hs-comment'>--</span>
<a name="line-3518"></a><span class='hs-comment'>-- Given a tensor `real` representing the real part of a complex number, and a</span>
<a name="line-3519"></a><span class='hs-comment'>-- tensor `imag` representing the imaginary part of a complex number, this</span>
<a name="line-3520"></a><span class='hs-comment'>-- operation returns complex numbers elementwise of the form \\(a + bj\\), where</span>
<a name="line-3521"></a><span class='hs-comment'>-- *a* represents the `real` part and *b* represents the `imag` part.</span>
<a name="line-3522"></a><span class='hs-comment'>-- </span>
<a name="line-3523"></a><span class='hs-comment'>-- The input tensors `real` and `imag` must have the same shape.</span>
<a name="line-3524"></a><span class='hs-comment'>-- </span>
<a name="line-3525"></a><span class='hs-comment'>-- For example:</span>
<a name="line-3526"></a><span class='hs-comment'>-- </span>
<a name="line-3527"></a><span class='hs-comment'>-- ```</span>
<a name="line-3528"></a><span class='hs-comment'>-- # tensor 'real' is [2.25, 3.25]</span>
<a name="line-3529"></a><span class='hs-comment'>-- # tensor `imag` is [4.75, 5.75]</span>
<a name="line-3530"></a><span class='hs-comment'>-- tf.complex(real, imag) ==&gt; [[2.25 + 4.75j], [3.25 + 5.75j]]</span>
<a name="line-3531"></a><span class='hs-comment'>-- ```</span>
<a name="line-3532"></a><span class='hs-definition'>complex</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3533"></a>                                  <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span>
<a name="line-3534"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3535"></a>                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3536"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __real__</span>
<a name="line-3537"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __imag__</span>
<a name="line-3538"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __out__</span>
<a name="line-3539"></a><span class='hs-definition'>complex</span> <span class='hs-varid'>real</span> <span class='hs-varid'>imag</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3540"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Complex"</span>
<a name="line-3541"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3542"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3543"></a>        <span class='hs-varid'>real</span> <span class='hs-varid'>imag</span>
<a name="line-3544"></a><span class='hs-comment'>{-
<a name="line-3545"></a>attr {
<a name="line-3546"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-3547"></a>  default_value { type: DT_FLOAT }
<a name="line-3548"></a>  name: "T"
<a name="line-3549"></a>  type: "type"
<a name="line-3550"></a>}
<a name="line-3551"></a>attr {
<a name="line-3552"></a>  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
<a name="line-3553"></a>  default_value { type: DT_COMPLEX64 }
<a name="line-3554"></a>  name: "Tout"
<a name="line-3555"></a>  type: "type"
<a name="line-3556"></a>}
<a name="line-3557"></a>input_arg { name: "real" type_attr: "T" }
<a name="line-3558"></a>input_arg { name: "imag" type_attr: "T" }
<a name="line-3559"></a>output_arg { name: "out" type_attr: "Tout" }
<a name="line-3560"></a>-}</span>
<a name="line-3561"></a>
<a name="line-3562"></a><a name="range"></a><span class='hs-comment'>-- | Creates a sequence of numbers.</span>
<a name="line-3563"></a><span class='hs-comment'>--</span>
<a name="line-3564"></a><span class='hs-comment'>-- This operation creates a sequence of numbers that begins at `start` and</span>
<a name="line-3565"></a><span class='hs-comment'>-- extends by increments of `delta` up to but not including `limit`.</span>
<a name="line-3566"></a><span class='hs-comment'>-- </span>
<a name="line-3567"></a><span class='hs-comment'>-- For example:</span>
<a name="line-3568"></a><span class='hs-comment'>-- </span>
<a name="line-3569"></a><span class='hs-comment'>-- ```</span>
<a name="line-3570"></a><span class='hs-comment'>-- # 'start' is 3</span>
<a name="line-3571"></a><span class='hs-comment'>-- # 'limit' is 18</span>
<a name="line-3572"></a><span class='hs-comment'>-- # 'delta' is 3</span>
<a name="line-3573"></a><span class='hs-comment'>-- tf.range(start, limit, delta) ==&gt; [3, 6, 9, 12, 15]</span>
<a name="line-3574"></a><span class='hs-comment'>-- ```</span>
<a name="line-3575"></a><span class='hs-definition'>range</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3576"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-3577"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-3578"></a>                                                          <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3579"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __start__: 0-D (scalar). First entry in the sequence.</span>
<a name="line-3580"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive.</span>
<a name="line-3581"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`.</span>
<a name="line-3582"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __output__: 1-D.</span>
<a name="line-3583"></a><span class='hs-definition'>range</span> <span class='hs-varid'>start</span> <span class='hs-varid'>limit</span> <span class='hs-varid'>delta</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3584"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Range"</span>
<a name="line-3585"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3586"></a>        <span class='hs-varid'>start</span> <span class='hs-varid'>limit</span> <span class='hs-varid'>delta</span>
<a name="line-3587"></a><span class='hs-comment'>{-
<a name="line-3588"></a>attr {
<a name="line-3589"></a>  allowed_values {
<a name="line-3590"></a>    list {
<a name="line-3591"></a>      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
<a name="line-3592"></a>    }
<a name="line-3593"></a>  }
<a name="line-3594"></a>  default_value { type: DT_INT32 }
<a name="line-3595"></a>  name: "Tidx"
<a name="line-3596"></a>  type: "type"
<a name="line-3597"></a>}
<a name="line-3598"></a>input_arg {
<a name="line-3599"></a>  description: "0-D (scalar). First entry in the sequence."
<a name="line-3600"></a>  name: "start"
<a name="line-3601"></a>  type_attr: "Tidx"
<a name="line-3602"></a>}
<a name="line-3603"></a>input_arg {
<a name="line-3604"></a>  description: "0-D (scalar). Upper limit of sequence, exclusive."
<a name="line-3605"></a>  name: "limit"
<a name="line-3606"></a>  type_attr: "Tidx"
<a name="line-3607"></a>}
<a name="line-3608"></a>input_arg {
<a name="line-3609"></a>  description: "0-D (scalar). Optional. Default is 1. Number that increments `start`."
<a name="line-3610"></a>  name: "delta"
<a name="line-3611"></a>  type_attr: "Tidx"
<a name="line-3612"></a>}
<a name="line-3613"></a>output_arg { description: "1-D." name: "output" type_attr: "Tidx" }
<a name="line-3614"></a>-}</span>
<a name="line-3615"></a>
<a name="line-3616"></a><a name="any"></a><span class='hs-comment'>-- | Computes the "logical or" of elements across dimensions of a tensor.</span>
<a name="line-3617"></a><span class='hs-comment'>--</span>
<a name="line-3618"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-3619"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-3620"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-3621"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-3622"></a><span class='hs-definition'>any</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3623"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3624"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-3625"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-3626"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-3627"></a><span class='hs-definition'>any</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3628"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Any"</span>
<a name="line-3629"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3630"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-3631"></a><span class='hs-comment'>{-
<a name="line-3632"></a>attr {
<a name="line-3633"></a>  default_value { b: false }
<a name="line-3634"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-3635"></a>  name: "keep_dims"
<a name="line-3636"></a>  type: "bool"
<a name="line-3637"></a>}
<a name="line-3638"></a>attr {
<a name="line-3639"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3640"></a>  default_value { type: DT_INT32 }
<a name="line-3641"></a>  name: "Tidx"
<a name="line-3642"></a>  type: "type"
<a name="line-3643"></a>}
<a name="line-3644"></a>input_arg {
<a name="line-3645"></a>  description: "The tensor to reduce." name: "input" type: DT_BOOL
<a name="line-3646"></a>}
<a name="line-3647"></a>input_arg {
<a name="line-3648"></a>  description: "The dimensions to reduce."
<a name="line-3649"></a>  name: "reduction_indices"
<a name="line-3650"></a>  type_attr: "Tidx"
<a name="line-3651"></a>}
<a name="line-3652"></a>output_arg {
<a name="line-3653"></a>  description: "The reduced tensor." name: "output" type: DT_BOOL
<a name="line-3654"></a>}
<a name="line-3655"></a>-}</span>
<a name="line-3656"></a>
<a name="line-3657"></a><a name="sparseSegmentMean"></a><span class='hs-comment'>-- | Computes the mean along sparse segments of a tensor.</span>
<a name="line-3658"></a><span class='hs-comment'>--</span>
<a name="line-3659"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-3660"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-3661"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-3662"></a><span class='hs-comment'>-- </span>
<a name="line-3663"></a><span class='hs-comment'>-- Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first</span>
<a name="line-3664"></a><span class='hs-comment'>-- dimension, selecting a subset of dimension 0, specified by `indices`.</span>
<a name="line-3665"></a><span class='hs-definition'>sparseSegmentMean</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-3666"></a>                                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3667"></a>                                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-3668"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3669"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3670"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-3671"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.</span>
<a name="line-3672"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.</span>
<a name="line-3673"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-3674"></a>                     <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-3675"></a><span class='hs-definition'>sparseSegmentMean</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3676"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSegmentMean"</span>
<a name="line-3677"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3678"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3679"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span>
<a name="line-3680"></a><span class='hs-comment'>{-
<a name="line-3681"></a>attr {
<a name="line-3682"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-3683"></a>  name: "T"
<a name="line-3684"></a>  type: "type"
<a name="line-3685"></a>}
<a name="line-3686"></a>attr {
<a name="line-3687"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3688"></a>  default_value { type: DT_INT32 }
<a name="line-3689"></a>  name: "Tidx"
<a name="line-3690"></a>  type: "type"
<a name="line-3691"></a>}
<a name="line-3692"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-3693"></a>input_arg {
<a name="line-3694"></a>  description: "A 1-D tensor. Has same rank as `segment_ids`."
<a name="line-3695"></a>  name: "indices"
<a name="line-3696"></a>  type_attr: "Tidx"
<a name="line-3697"></a>}
<a name="line-3698"></a>input_arg {
<a name="line-3699"></a>  description: "A 1-D tensor. Values should be sorted and can be repeated."
<a name="line-3700"></a>  name: "segment_ids"
<a name="line-3701"></a>  type: DT_INT32
<a name="line-3702"></a>}
<a name="line-3703"></a>output_arg {
<a name="line-3704"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-3705"></a>  name: "output"
<a name="line-3706"></a>  type_attr: "T"
<a name="line-3707"></a>}
<a name="line-3708"></a>-}</span>
<a name="line-3709"></a>
<a name="line-3710"></a><a name="sparseSegmentSum"></a><span class='hs-comment'>-- | Computes the sum along sparse segments of a tensor.</span>
<a name="line-3711"></a><span class='hs-comment'>--</span>
<a name="line-3712"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-3713"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-3714"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-3715"></a><span class='hs-comment'>-- </span>
<a name="line-3716"></a><span class='hs-comment'>-- Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first</span>
<a name="line-3717"></a><span class='hs-comment'>-- dimension, selecting a subset of dimension 0, specified by `indices`.</span>
<a name="line-3718"></a><span class='hs-comment'>-- </span>
<a name="line-3719"></a><span class='hs-comment'>-- For example:</span>
<a name="line-3720"></a><span class='hs-comment'>-- </span>
<a name="line-3721"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-3722"></a><span class='hs-comment'>-- c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])</span>
<a name="line-3723"></a><span class='hs-comment'>-- </span>
<a name="line-3724"></a><span class='hs-comment'>-- # Select two rows, one segment.</span>
<a name="line-3725"></a><span class='hs-comment'>-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))</span>
<a name="line-3726"></a><span class='hs-comment'>--   ==&gt; [[0 0 0 0]]</span>
<a name="line-3727"></a><span class='hs-comment'>-- </span>
<a name="line-3728"></a><span class='hs-comment'>-- # Select two rows, two segment.</span>
<a name="line-3729"></a><span class='hs-comment'>-- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))</span>
<a name="line-3730"></a><span class='hs-comment'>--   ==&gt; [[ 1  2  3  4]</span>
<a name="line-3731"></a><span class='hs-comment'>--        [-1 -2 -3 -4]]</span>
<a name="line-3732"></a><span class='hs-comment'>-- </span>
<a name="line-3733"></a><span class='hs-comment'>-- # Select all rows, two segments.</span>
<a name="line-3734"></a><span class='hs-comment'>-- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))</span>
<a name="line-3735"></a><span class='hs-comment'>--   ==&gt; [[0 0 0 0]</span>
<a name="line-3736"></a><span class='hs-comment'>--        [5 6 7 8]]</span>
<a name="line-3737"></a><span class='hs-comment'>-- </span>
<a name="line-3738"></a><span class='hs-comment'>-- # Which is equivalent to:</span>
<a name="line-3739"></a><span class='hs-comment'>-- tf.segment_sum(c, tf.constant([0, 0, 1]))</span>
<a name="line-3740"></a><span class='hs-comment'>-- ```</span>
<a name="line-3741"></a><span class='hs-definition'>sparseSegmentSum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3742"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-3743"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3744"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-3745"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3746"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3747"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-3748"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-3749"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3750"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3751"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-3752"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.</span>
<a name="line-3753"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.</span>
<a name="line-3754"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-3755"></a>                    <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-3756"></a><span class='hs-definition'>sparseSegmentSum</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3757"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSegmentSum"</span>
<a name="line-3758"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3759"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3760"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span>
<a name="line-3761"></a><span class='hs-comment'>{-
<a name="line-3762"></a>attr {
<a name="line-3763"></a>  allowed_values {
<a name="line-3764"></a>    list {
<a name="line-3765"></a>      type: DT_FLOAT
<a name="line-3766"></a>      type: DT_DOUBLE
<a name="line-3767"></a>      type: DT_INT32
<a name="line-3768"></a>      type: DT_INT64
<a name="line-3769"></a>      type: DT_UINT8
<a name="line-3770"></a>      type: DT_INT16
<a name="line-3771"></a>      type: DT_INT8
<a name="line-3772"></a>      type: DT_UINT16
<a name="line-3773"></a>      type: DT_HALF
<a name="line-3774"></a>    }
<a name="line-3775"></a>  }
<a name="line-3776"></a>  name: "T"
<a name="line-3777"></a>  type: "type"
<a name="line-3778"></a>}
<a name="line-3779"></a>attr {
<a name="line-3780"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3781"></a>  default_value { type: DT_INT32 }
<a name="line-3782"></a>  name: "Tidx"
<a name="line-3783"></a>  type: "type"
<a name="line-3784"></a>}
<a name="line-3785"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-3786"></a>input_arg {
<a name="line-3787"></a>  description: "A 1-D tensor. Has same rank as `segment_ids`."
<a name="line-3788"></a>  name: "indices"
<a name="line-3789"></a>  type_attr: "Tidx"
<a name="line-3790"></a>}
<a name="line-3791"></a>input_arg {
<a name="line-3792"></a>  description: "A 1-D tensor. Values should be sorted and can be repeated."
<a name="line-3793"></a>  name: "segment_ids"
<a name="line-3794"></a>  type: DT_INT32
<a name="line-3795"></a>}
<a name="line-3796"></a>output_arg {
<a name="line-3797"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-3798"></a>  name: "output"
<a name="line-3799"></a>  type_attr: "T"
<a name="line-3800"></a>}
<a name="line-3801"></a>-}</span>
<a name="line-3802"></a>
<a name="line-3803"></a><a name="unsortedSegmentSum"></a><span class='hs-comment'>-- | Computes the sum along segments of a tensor.</span>
<a name="line-3804"></a><span class='hs-comment'>--</span>
<a name="line-3805"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-3806"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-3807"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-3808"></a><span class='hs-comment'>-- </span>
<a name="line-3809"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-3810"></a><span class='hs-comment'>-- `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such</span>
<a name="line-3811"></a><span class='hs-comment'>-- that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`</span>
<a name="line-3812"></a><span class='hs-comment'>-- need not be sorted and need not cover all values in the full</span>
<a name="line-3813"></a><span class='hs-comment'>-- range of valid values.</span>
<a name="line-3814"></a><span class='hs-comment'>-- </span>
<a name="line-3815"></a><span class='hs-comment'>-- If the sum is empty for a given segment ID `i`, `output[i] = 0`.</span>
<a name="line-3816"></a><span class='hs-comment'>-- </span>
<a name="line-3817"></a><span class='hs-comment'>-- `num_segments` should equal the number of distinct segment IDs.</span>
<a name="line-3818"></a><span class='hs-comment'>-- </span>
<a name="line-3819"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-3820"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/UnsortedSegmentSum.png" alt&gt;</span>
<a name="line-3821"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-3822"></a><span class='hs-definition'>unsortedSegmentSum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3823"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3824"></a>                                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3825"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-3826"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3827"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-3828"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3829"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3830"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-3831"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3832"></a>                                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-3833"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3834"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3835"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-3836"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`.</span>
<a name="line-3837"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_segments__</span>
<a name="line-3838"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for the first `segment_ids.rank`</span>
<a name="line-3839"></a>                      <span class='hs-comment'>-- dimensions, which are replaced with a single dimension which has size</span>
<a name="line-3840"></a>                      <span class='hs-comment'>-- `num_segments`.</span>
<a name="line-3841"></a><span class='hs-definition'>unsortedSegmentSum</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>num_segments</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3842"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"UnsortedSegmentSum"</span>
<a name="line-3843"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3844"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3845"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>num_segments</span>
<a name="line-3846"></a><span class='hs-comment'>{-
<a name="line-3847"></a>attr {
<a name="line-3848"></a>  allowed_values {
<a name="line-3849"></a>    list {
<a name="line-3850"></a>      type: DT_FLOAT
<a name="line-3851"></a>      type: DT_DOUBLE
<a name="line-3852"></a>      type: DT_INT64
<a name="line-3853"></a>      type: DT_INT32
<a name="line-3854"></a>      type: DT_UINT8
<a name="line-3855"></a>      type: DT_UINT16
<a name="line-3856"></a>      type: DT_INT16
<a name="line-3857"></a>      type: DT_INT8
<a name="line-3858"></a>      type: DT_COMPLEX64
<a name="line-3859"></a>      type: DT_COMPLEX128
<a name="line-3860"></a>      type: DT_QINT8
<a name="line-3861"></a>      type: DT_QUINT8
<a name="line-3862"></a>      type: DT_QINT32
<a name="line-3863"></a>      type: DT_HALF
<a name="line-3864"></a>    }
<a name="line-3865"></a>  }
<a name="line-3866"></a>  name: "T"
<a name="line-3867"></a>  type: "type"
<a name="line-3868"></a>}
<a name="line-3869"></a>attr {
<a name="line-3870"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3871"></a>  name: "Tindices"
<a name="line-3872"></a>  type: "type"
<a name="line-3873"></a>}
<a name="line-3874"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-3875"></a>input_arg {
<a name="line-3876"></a>  description: "A tensor whose shape is a prefix of `data.shape`."
<a name="line-3877"></a>  name: "segment_ids"
<a name="line-3878"></a>  type_attr: "Tindices"
<a name="line-3879"></a>}
<a name="line-3880"></a>input_arg { name: "num_segments" type: DT_INT32 }
<a name="line-3881"></a>output_arg {
<a name="line-3882"></a>  description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
<a name="line-3883"></a>  name: "output"
<a name="line-3884"></a>  type_attr: "T"
<a name="line-3885"></a>}
<a name="line-3886"></a>-}</span>
<a name="line-3887"></a>
<a name="line-3888"></a><a name="segmentMin"></a><span class='hs-comment'>-- | Computes the minimum along segments of a tensor.</span>
<a name="line-3889"></a><span class='hs-comment'>--</span>
<a name="line-3890"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-3891"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-3892"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-3893"></a><span class='hs-comment'>-- </span>
<a name="line-3894"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-3895"></a><span class='hs-comment'>-- \\(output_i = \min_j(data_j)\\) where `min` is over `j` such</span>
<a name="line-3896"></a><span class='hs-comment'>-- that `segment_ids[j] == i`.</span>
<a name="line-3897"></a><span class='hs-comment'>-- </span>
<a name="line-3898"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-3899"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/SegmentMin.png" alt&gt;</span>
<a name="line-3900"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-3901"></a><span class='hs-definition'>segmentMin</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-3902"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3903"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-3904"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3905"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3906"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-3907"></a>                                                               <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3908"></a>                                         <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-3909"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3910"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3911"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-3912"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s</span>
<a name="line-3913"></a>                                    <span class='hs-comment'>-- first dimension.  Values should be sorted and can be repeated.</span>
<a name="line-3914"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-3915"></a>              <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-3916"></a><span class='hs-definition'>segmentMin</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3917"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SegmentMin"</span>
<a name="line-3918"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3919"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3920"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span>
<a name="line-3921"></a><span class='hs-comment'>{-
<a name="line-3922"></a>attr {
<a name="line-3923"></a>  allowed_values {
<a name="line-3924"></a>    list {
<a name="line-3925"></a>      type: DT_FLOAT
<a name="line-3926"></a>      type: DT_DOUBLE
<a name="line-3927"></a>      type: DT_INT32
<a name="line-3928"></a>      type: DT_INT64
<a name="line-3929"></a>      type: DT_UINT8
<a name="line-3930"></a>      type: DT_INT16
<a name="line-3931"></a>      type: DT_INT8
<a name="line-3932"></a>      type: DT_UINT16
<a name="line-3933"></a>      type: DT_HALF
<a name="line-3934"></a>    }
<a name="line-3935"></a>  }
<a name="line-3936"></a>  name: "T"
<a name="line-3937"></a>  type: "type"
<a name="line-3938"></a>}
<a name="line-3939"></a>attr {
<a name="line-3940"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-3941"></a>  name: "Tindices"
<a name="line-3942"></a>  type: "type"
<a name="line-3943"></a>}
<a name="line-3944"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-3945"></a>input_arg {
<a name="line-3946"></a>  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
<a name="line-3947"></a>  name: "segment_ids"
<a name="line-3948"></a>  type_attr: "Tindices"
<a name="line-3949"></a>}
<a name="line-3950"></a>output_arg {
<a name="line-3951"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-3952"></a>  name: "output"
<a name="line-3953"></a>  type_attr: "T"
<a name="line-3954"></a>}
<a name="line-3955"></a>-}</span>
<a name="line-3956"></a>
<a name="line-3957"></a><a name="segmentProd"></a><span class='hs-comment'>-- | Computes the product along segments of a tensor.</span>
<a name="line-3958"></a><span class='hs-comment'>--</span>
<a name="line-3959"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-3960"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-3961"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-3962"></a><span class='hs-comment'>-- </span>
<a name="line-3963"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-3964"></a><span class='hs-comment'>-- \\(output_i = \prod_j data_j\\) where the product is over `j` such</span>
<a name="line-3965"></a><span class='hs-comment'>-- that `segment_ids[j] == i`.</span>
<a name="line-3966"></a><span class='hs-comment'>-- </span>
<a name="line-3967"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-3968"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/SegmentProd.png" alt&gt;</span>
<a name="line-3969"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-3970"></a><span class='hs-definition'>segmentProd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-3971"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3972"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-3973"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-3974"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3975"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-3976"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-3977"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-3978"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-3979"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-3980"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-3981"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-3982"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s</span>
<a name="line-3983"></a>                                     <span class='hs-comment'>-- first dimension.  Values should be sorted and can be repeated.</span>
<a name="line-3984"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-3985"></a>               <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-3986"></a><span class='hs-definition'>segmentProd</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-3987"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SegmentProd"</span>
<a name="line-3988"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-3989"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-3990"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span>
<a name="line-3991"></a><span class='hs-comment'>{-
<a name="line-3992"></a>attr {
<a name="line-3993"></a>  allowed_values {
<a name="line-3994"></a>    list {
<a name="line-3995"></a>      type: DT_FLOAT
<a name="line-3996"></a>      type: DT_DOUBLE
<a name="line-3997"></a>      type: DT_INT64
<a name="line-3998"></a>      type: DT_INT32
<a name="line-3999"></a>      type: DT_UINT8
<a name="line-4000"></a>      type: DT_UINT16
<a name="line-4001"></a>      type: DT_INT16
<a name="line-4002"></a>      type: DT_INT8
<a name="line-4003"></a>      type: DT_COMPLEX64
<a name="line-4004"></a>      type: DT_COMPLEX128
<a name="line-4005"></a>      type: DT_QINT8
<a name="line-4006"></a>      type: DT_QUINT8
<a name="line-4007"></a>      type: DT_QINT32
<a name="line-4008"></a>      type: DT_HALF
<a name="line-4009"></a>    }
<a name="line-4010"></a>  }
<a name="line-4011"></a>  name: "T"
<a name="line-4012"></a>  type: "type"
<a name="line-4013"></a>}
<a name="line-4014"></a>attr {
<a name="line-4015"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4016"></a>  name: "Tindices"
<a name="line-4017"></a>  type: "type"
<a name="line-4018"></a>}
<a name="line-4019"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-4020"></a>input_arg {
<a name="line-4021"></a>  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
<a name="line-4022"></a>  name: "segment_ids"
<a name="line-4023"></a>  type_attr: "Tindices"
<a name="line-4024"></a>}
<a name="line-4025"></a>output_arg {
<a name="line-4026"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-4027"></a>  name: "output"
<a name="line-4028"></a>  type_attr: "T"
<a name="line-4029"></a>}
<a name="line-4030"></a>-}</span>
<a name="line-4031"></a>
<a name="line-4032"></a><a name="segmentMean"></a><span class='hs-comment'>-- | Computes the mean along segments of a tensor.</span>
<a name="line-4033"></a><span class='hs-comment'>--</span>
<a name="line-4034"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-4035"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-4036"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-4037"></a><span class='hs-comment'>-- </span>
<a name="line-4038"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-4039"></a><span class='hs-comment'>-- \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is</span>
<a name="line-4040"></a><span class='hs-comment'>-- over `j` such that `segment_ids[j] == i` and `N` is the total number of</span>
<a name="line-4041"></a><span class='hs-comment'>-- values summed.</span>
<a name="line-4042"></a><span class='hs-comment'>-- </span>
<a name="line-4043"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-4044"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/SegmentMean.png" alt&gt;</span>
<a name="line-4045"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-4046"></a><span class='hs-definition'>segmentMean</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-4047"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4048"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4049"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4050"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4051"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-4052"></a>                                                                <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4053"></a>                                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4054"></a>                                          <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-4055"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4056"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4057"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-4058"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s</span>
<a name="line-4059"></a>                                     <span class='hs-comment'>-- first dimension.  Values should be sorted and can be repeated.</span>
<a name="line-4060"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-4061"></a>               <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-4062"></a><span class='hs-definition'>segmentMean</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4063"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SegmentMean"</span>
<a name="line-4064"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4065"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4066"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span>
<a name="line-4067"></a><span class='hs-comment'>{-
<a name="line-4068"></a>attr {
<a name="line-4069"></a>  allowed_values {
<a name="line-4070"></a>    list {
<a name="line-4071"></a>      type: DT_FLOAT
<a name="line-4072"></a>      type: DT_DOUBLE
<a name="line-4073"></a>      type: DT_INT32
<a name="line-4074"></a>      type: DT_INT64
<a name="line-4075"></a>      type: DT_UINT8
<a name="line-4076"></a>      type: DT_INT16
<a name="line-4077"></a>      type: DT_INT8
<a name="line-4078"></a>      type: DT_UINT16
<a name="line-4079"></a>      type: DT_HALF
<a name="line-4080"></a>    }
<a name="line-4081"></a>  }
<a name="line-4082"></a>  name: "T"
<a name="line-4083"></a>  type: "type"
<a name="line-4084"></a>}
<a name="line-4085"></a>attr {
<a name="line-4086"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4087"></a>  name: "Tindices"
<a name="line-4088"></a>  type: "type"
<a name="line-4089"></a>}
<a name="line-4090"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-4091"></a>input_arg {
<a name="line-4092"></a>  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
<a name="line-4093"></a>  name: "segment_ids"
<a name="line-4094"></a>  type_attr: "Tindices"
<a name="line-4095"></a>}
<a name="line-4096"></a>output_arg {
<a name="line-4097"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-4098"></a>  name: "output"
<a name="line-4099"></a>  type_attr: "T"
<a name="line-4100"></a>}
<a name="line-4101"></a>-}</span>
<a name="line-4102"></a>
<a name="line-4103"></a><a name="segmentSum"></a><span class='hs-comment'>-- | Computes the sum along segments of a tensor.</span>
<a name="line-4104"></a><span class='hs-comment'>--</span>
<a name="line-4105"></a><span class='hs-comment'>-- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)</span>
<a name="line-4106"></a><span class='hs-comment'>-- for an explanation of segments.</span>
<a name="line-4107"></a><span class='hs-comment'>-- </span>
<a name="line-4108"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-4109"></a><span class='hs-comment'>-- \\(output_i = \sum_j data_j\\) where sum is over `j` such</span>
<a name="line-4110"></a><span class='hs-comment'>-- that `segment_ids[j] == i`.</span>
<a name="line-4111"></a><span class='hs-comment'>-- </span>
<a name="line-4112"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-4113"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/SegmentSum.png" alt&gt;</span>
<a name="line-4114"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-4115"></a><span class='hs-definition'>segmentSum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4116"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4117"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4118"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4119"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4120"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4121"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4122"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-4123"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4124"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4125"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-4126"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s</span>
<a name="line-4127"></a>                                    <span class='hs-comment'>-- first dimension.  Values should be sorted and can be repeated.</span>
<a name="line-4128"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-4129"></a>              <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-4130"></a><span class='hs-definition'>segmentSum</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4131"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SegmentSum"</span>
<a name="line-4132"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4133"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4134"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span>
<a name="line-4135"></a><span class='hs-comment'>{-
<a name="line-4136"></a>attr {
<a name="line-4137"></a>  allowed_values {
<a name="line-4138"></a>    list {
<a name="line-4139"></a>      type: DT_FLOAT
<a name="line-4140"></a>      type: DT_DOUBLE
<a name="line-4141"></a>      type: DT_INT64
<a name="line-4142"></a>      type: DT_INT32
<a name="line-4143"></a>      type: DT_UINT8
<a name="line-4144"></a>      type: DT_UINT16
<a name="line-4145"></a>      type: DT_INT16
<a name="line-4146"></a>      type: DT_INT8
<a name="line-4147"></a>      type: DT_COMPLEX64
<a name="line-4148"></a>      type: DT_COMPLEX128
<a name="line-4149"></a>      type: DT_QINT8
<a name="line-4150"></a>      type: DT_QUINT8
<a name="line-4151"></a>      type: DT_QINT32
<a name="line-4152"></a>      type: DT_HALF
<a name="line-4153"></a>    }
<a name="line-4154"></a>  }
<a name="line-4155"></a>  name: "T"
<a name="line-4156"></a>  type: "type"
<a name="line-4157"></a>}
<a name="line-4158"></a>attr {
<a name="line-4159"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4160"></a>  name: "Tindices"
<a name="line-4161"></a>  type: "type"
<a name="line-4162"></a>}
<a name="line-4163"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-4164"></a>input_arg {
<a name="line-4165"></a>  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
<a name="line-4166"></a>  name: "segment_ids"
<a name="line-4167"></a>  type_attr: "Tindices"
<a name="line-4168"></a>}
<a name="line-4169"></a>output_arg {
<a name="line-4170"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-4171"></a>  name: "output"
<a name="line-4172"></a>  type_attr: "T"
<a name="line-4173"></a>}
<a name="line-4174"></a>-}</span>
<a name="line-4175"></a>
<a name="line-4176"></a><span class='hs-comment'>-- | Returns the index with the smallest value across dimensions of a tensor.</span>
<a name="line-4177"></a>
<a name="line-4178"></a><a name="argMin"></a><span class='hs-definition'>argMin</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4179"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4180"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4181"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4182"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4183"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-4184"></a>                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-4185"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4186"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4187"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-4188"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __dimension__: int32, 0 &lt;= dimension &lt; rank(input).  Describes which dimension</span>
<a name="line-4189"></a>                            <span class='hs-comment'>-- of the input Tensor to reduce across. For vectors, use dimension = 0.</span>
<a name="line-4190"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-4191"></a><span class='hs-definition'>argMin</span> <span class='hs-varid'>input</span> <span class='hs-varid'>dimension</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4192"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ArgMin"</span>
<a name="line-4193"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4194"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4195"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>dimension</span>
<a name="line-4196"></a><span class='hs-comment'>{-
<a name="line-4197"></a>attr {
<a name="line-4198"></a>  allowed_values {
<a name="line-4199"></a>    list {
<a name="line-4200"></a>      type: DT_FLOAT
<a name="line-4201"></a>      type: DT_DOUBLE
<a name="line-4202"></a>      type: DT_INT64
<a name="line-4203"></a>      type: DT_INT32
<a name="line-4204"></a>      type: DT_UINT8
<a name="line-4205"></a>      type: DT_UINT16
<a name="line-4206"></a>      type: DT_INT16
<a name="line-4207"></a>      type: DT_INT8
<a name="line-4208"></a>      type: DT_COMPLEX64
<a name="line-4209"></a>      type: DT_COMPLEX128
<a name="line-4210"></a>      type: DT_QINT8
<a name="line-4211"></a>      type: DT_QUINT8
<a name="line-4212"></a>      type: DT_QINT32
<a name="line-4213"></a>      type: DT_HALF
<a name="line-4214"></a>    }
<a name="line-4215"></a>  }
<a name="line-4216"></a>  name: "T"
<a name="line-4217"></a>  type: "type"
<a name="line-4218"></a>}
<a name="line-4219"></a>attr {
<a name="line-4220"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4221"></a>  default_value { type: DT_INT32 }
<a name="line-4222"></a>  name: "Tidx"
<a name="line-4223"></a>  type: "type"
<a name="line-4224"></a>}
<a name="line-4225"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-4226"></a>input_arg {
<a name="line-4227"></a>  description: "int32, 0 &lt;= dimension &lt; rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
<a name="line-4228"></a>  name: "dimension"
<a name="line-4229"></a>  type_attr: "Tidx"
<a name="line-4230"></a>}
<a name="line-4231"></a>output_arg { name: "output" type: DT_INT64 }
<a name="line-4232"></a>-}</span>
<a name="line-4233"></a>
<a name="line-4234"></a><a name="max"></a><span class='hs-comment'>-- | Computes the maximum of elements across dimensions of a tensor.</span>
<a name="line-4235"></a><span class='hs-comment'>--</span>
<a name="line-4236"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-4237"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-4238"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-4239"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-4240"></a><span class='hs-definition'>max</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4241"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4242"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4243"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4244"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4245"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4246"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-4247"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4248"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-4249"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-4250"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-4251"></a><span class='hs-definition'>max</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4252"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Max"</span>
<a name="line-4253"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4254"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4255"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-4256"></a><span class='hs-comment'>{-
<a name="line-4257"></a>attr {
<a name="line-4258"></a>  default_value { b: false }
<a name="line-4259"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-4260"></a>  name: "keep_dims"
<a name="line-4261"></a>  type: "bool"
<a name="line-4262"></a>}
<a name="line-4263"></a>attr {
<a name="line-4264"></a>  allowed_values {
<a name="line-4265"></a>    list {
<a name="line-4266"></a>      type: DT_FLOAT
<a name="line-4267"></a>      type: DT_DOUBLE
<a name="line-4268"></a>      type: DT_INT64
<a name="line-4269"></a>      type: DT_INT32
<a name="line-4270"></a>      type: DT_UINT8
<a name="line-4271"></a>      type: DT_UINT16
<a name="line-4272"></a>      type: DT_INT16
<a name="line-4273"></a>      type: DT_INT8
<a name="line-4274"></a>      type: DT_COMPLEX64
<a name="line-4275"></a>      type: DT_COMPLEX128
<a name="line-4276"></a>      type: DT_QINT8
<a name="line-4277"></a>      type: DT_QUINT8
<a name="line-4278"></a>      type: DT_QINT32
<a name="line-4279"></a>      type: DT_HALF
<a name="line-4280"></a>    }
<a name="line-4281"></a>  }
<a name="line-4282"></a>  name: "T"
<a name="line-4283"></a>  type: "type"
<a name="line-4284"></a>}
<a name="line-4285"></a>attr {
<a name="line-4286"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4287"></a>  default_value { type: DT_INT32 }
<a name="line-4288"></a>  name: "Tidx"
<a name="line-4289"></a>  type: "type"
<a name="line-4290"></a>}
<a name="line-4291"></a>input_arg {
<a name="line-4292"></a>  description: "The tensor to reduce." name: "input" type_attr: "T"
<a name="line-4293"></a>}
<a name="line-4294"></a>input_arg {
<a name="line-4295"></a>  description: "The dimensions to reduce."
<a name="line-4296"></a>  name: "reduction_indices"
<a name="line-4297"></a>  type_attr: "Tidx"
<a name="line-4298"></a>}
<a name="line-4299"></a>output_arg {
<a name="line-4300"></a>  description: "The reduced tensor." name: "output" type_attr: "T"
<a name="line-4301"></a>}
<a name="line-4302"></a>-}</span>
<a name="line-4303"></a>
<a name="line-4304"></a><a name="min"></a><span class='hs-comment'>-- | Computes the minimum of elements across dimensions of a tensor.</span>
<a name="line-4305"></a><span class='hs-comment'>--</span>
<a name="line-4306"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-4307"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-4308"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-4309"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-4310"></a><span class='hs-definition'>min</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4311"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4312"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4313"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4314"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4315"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4316"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-4317"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4318"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-4319"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-4320"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-4321"></a><span class='hs-definition'>min</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4322"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Min"</span>
<a name="line-4323"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4324"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4325"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-4326"></a><span class='hs-comment'>{-
<a name="line-4327"></a>attr {
<a name="line-4328"></a>  default_value { b: false }
<a name="line-4329"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-4330"></a>  name: "keep_dims"
<a name="line-4331"></a>  type: "bool"
<a name="line-4332"></a>}
<a name="line-4333"></a>attr {
<a name="line-4334"></a>  allowed_values {
<a name="line-4335"></a>    list {
<a name="line-4336"></a>      type: DT_FLOAT
<a name="line-4337"></a>      type: DT_DOUBLE
<a name="line-4338"></a>      type: DT_INT64
<a name="line-4339"></a>      type: DT_INT32
<a name="line-4340"></a>      type: DT_UINT8
<a name="line-4341"></a>      type: DT_UINT16
<a name="line-4342"></a>      type: DT_INT16
<a name="line-4343"></a>      type: DT_INT8
<a name="line-4344"></a>      type: DT_COMPLEX64
<a name="line-4345"></a>      type: DT_COMPLEX128
<a name="line-4346"></a>      type: DT_QINT8
<a name="line-4347"></a>      type: DT_QUINT8
<a name="line-4348"></a>      type: DT_QINT32
<a name="line-4349"></a>      type: DT_HALF
<a name="line-4350"></a>    }
<a name="line-4351"></a>  }
<a name="line-4352"></a>  name: "T"
<a name="line-4353"></a>  type: "type"
<a name="line-4354"></a>}
<a name="line-4355"></a>attr {
<a name="line-4356"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4357"></a>  default_value { type: DT_INT32 }
<a name="line-4358"></a>  name: "Tidx"
<a name="line-4359"></a>  type: "type"
<a name="line-4360"></a>}
<a name="line-4361"></a>input_arg {
<a name="line-4362"></a>  description: "The tensor to reduce." name: "input" type_attr: "T"
<a name="line-4363"></a>}
<a name="line-4364"></a>input_arg {
<a name="line-4365"></a>  description: "The dimensions to reduce."
<a name="line-4366"></a>  name: "reduction_indices"
<a name="line-4367"></a>  type_attr: "Tidx"
<a name="line-4368"></a>}
<a name="line-4369"></a>output_arg {
<a name="line-4370"></a>  description: "The reduced tensor." name: "output" type_attr: "T"
<a name="line-4371"></a>}
<a name="line-4372"></a>-}</span>
<a name="line-4373"></a>
<a name="line-4374"></a><a name="prod"></a><span class='hs-comment'>-- | Computes the product of elements across dimensions of a tensor.</span>
<a name="line-4375"></a><span class='hs-comment'>--</span>
<a name="line-4376"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-4377"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-4378"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-4379"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-4380"></a><span class='hs-definition'>prod</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4381"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4382"></a>                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4383"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4384"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4385"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-4386"></a>                                       <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-4387"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4388"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-4389"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-4390"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-4391"></a><span class='hs-definition'>prod</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4392"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Prod"</span>
<a name="line-4393"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4394"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4395"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-4396"></a><span class='hs-comment'>{-
<a name="line-4397"></a>attr {
<a name="line-4398"></a>  default_value { b: false }
<a name="line-4399"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-4400"></a>  name: "keep_dims"
<a name="line-4401"></a>  type: "bool"
<a name="line-4402"></a>}
<a name="line-4403"></a>attr {
<a name="line-4404"></a>  allowed_values {
<a name="line-4405"></a>    list {
<a name="line-4406"></a>      type: DT_FLOAT
<a name="line-4407"></a>      type: DT_DOUBLE
<a name="line-4408"></a>      type: DT_INT64
<a name="line-4409"></a>      type: DT_INT32
<a name="line-4410"></a>      type: DT_UINT8
<a name="line-4411"></a>      type: DT_UINT16
<a name="line-4412"></a>      type: DT_INT16
<a name="line-4413"></a>      type: DT_INT8
<a name="line-4414"></a>      type: DT_COMPLEX64
<a name="line-4415"></a>      type: DT_COMPLEX128
<a name="line-4416"></a>      type: DT_QINT8
<a name="line-4417"></a>      type: DT_QUINT8
<a name="line-4418"></a>      type: DT_QINT32
<a name="line-4419"></a>      type: DT_HALF
<a name="line-4420"></a>    }
<a name="line-4421"></a>  }
<a name="line-4422"></a>  name: "T"
<a name="line-4423"></a>  type: "type"
<a name="line-4424"></a>}
<a name="line-4425"></a>attr {
<a name="line-4426"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4427"></a>  default_value { type: DT_INT32 }
<a name="line-4428"></a>  name: "Tidx"
<a name="line-4429"></a>  type: "type"
<a name="line-4430"></a>}
<a name="line-4431"></a>input_arg {
<a name="line-4432"></a>  description: "The tensor to reduce." name: "input" type_attr: "T"
<a name="line-4433"></a>}
<a name="line-4434"></a>input_arg {
<a name="line-4435"></a>  description: "The dimensions to reduce."
<a name="line-4436"></a>  name: "reduction_indices"
<a name="line-4437"></a>  type_attr: "Tidx"
<a name="line-4438"></a>}
<a name="line-4439"></a>output_arg {
<a name="line-4440"></a>  description: "The reduced tensor." name: "output" type_attr: "T"
<a name="line-4441"></a>}
<a name="line-4442"></a>-}</span>
<a name="line-4443"></a>
<a name="line-4444"></a><a name="sum"></a><span class='hs-comment'>-- | Computes the sum of elements across dimensions of a tensor.</span>
<a name="line-4445"></a><span class='hs-comment'>--</span>
<a name="line-4446"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-4447"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-4448"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-4449"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-4450"></a><span class='hs-definition'>sum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4451"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4452"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4453"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4454"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4455"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4456"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-4457"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4458"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-4459"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-4460"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-4461"></a><span class='hs-definition'>sum</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4462"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sum"</span>
<a name="line-4463"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-4464"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4465"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-4466"></a><span class='hs-comment'>{-
<a name="line-4467"></a>attr {
<a name="line-4468"></a>  default_value { b: false }
<a name="line-4469"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-4470"></a>  name: "keep_dims"
<a name="line-4471"></a>  type: "bool"
<a name="line-4472"></a>}
<a name="line-4473"></a>attr {
<a name="line-4474"></a>  allowed_values {
<a name="line-4475"></a>    list {
<a name="line-4476"></a>      type: DT_FLOAT
<a name="line-4477"></a>      type: DT_DOUBLE
<a name="line-4478"></a>      type: DT_INT64
<a name="line-4479"></a>      type: DT_INT32
<a name="line-4480"></a>      type: DT_UINT8
<a name="line-4481"></a>      type: DT_UINT16
<a name="line-4482"></a>      type: DT_INT16
<a name="line-4483"></a>      type: DT_INT8
<a name="line-4484"></a>      type: DT_COMPLEX64
<a name="line-4485"></a>      type: DT_COMPLEX128
<a name="line-4486"></a>      type: DT_QINT8
<a name="line-4487"></a>      type: DT_QUINT8
<a name="line-4488"></a>      type: DT_QINT32
<a name="line-4489"></a>      type: DT_HALF
<a name="line-4490"></a>    }
<a name="line-4491"></a>  }
<a name="line-4492"></a>  name: "T"
<a name="line-4493"></a>  type: "type"
<a name="line-4494"></a>}
<a name="line-4495"></a>attr {
<a name="line-4496"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-4497"></a>  default_value { type: DT_INT32 }
<a name="line-4498"></a>  name: "Tidx"
<a name="line-4499"></a>  type: "type"
<a name="line-4500"></a>}
<a name="line-4501"></a>input_arg {
<a name="line-4502"></a>  description: "The tensor to reduce." name: "input" type_attr: "T"
<a name="line-4503"></a>}
<a name="line-4504"></a>input_arg {
<a name="line-4505"></a>  description: "The dimensions to reduce."
<a name="line-4506"></a>  name: "reduction_indices"
<a name="line-4507"></a>  type_attr: "Tidx"
<a name="line-4508"></a>}
<a name="line-4509"></a>output_arg {
<a name="line-4510"></a>  description: "The reduced tensor." name: "output" type_attr: "T"
<a name="line-4511"></a>}
<a name="line-4512"></a>-}</span>
<a name="line-4513"></a>
<a name="line-4514"></a><a name="sparseMatMul"></a><span class='hs-comment'>-- | Multiply matrix "a" by matrix "b".</span>
<a name="line-4515"></a><span class='hs-comment'>--</span>
<a name="line-4516"></a><span class='hs-comment'>-- The inputs must be two-dimensional matrices and the inner dimension of "a" must</span>
<a name="line-4517"></a><span class='hs-comment'>-- match the outer dimension of "b". This op is optimized for the case where at</span>
<a name="line-4518"></a><span class='hs-comment'>-- least one of "a" or "b" is sparse. The breakeven for using this versus a dense</span>
<a name="line-4519"></a><span class='hs-comment'>-- matrix multiply on one platform was 30% zero values in the sparse matrix.</span>
<a name="line-4520"></a><span class='hs-definition'>sparseMatMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>ta</span> <span class='hs-varid'>tb</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>ta</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4521"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>ta</span><span class='hs-layout'>,</span>
<a name="line-4522"></a>                                      <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tb</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4523"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tb</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4524"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>ta</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-4525"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tb</span> <span class='hs-comment'>-- ^ __b__</span>
<a name="line-4526"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __product__</span>
<a name="line-4527"></a><span class='hs-definition'>sparseMatMul</span> <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4528"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseMatMul"</span>
<a name="line-4529"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Ta"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>ta</span><span class='hs-layout'>)</span>
<a name="line-4530"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tb"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tb</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4531"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>b</span>
<a name="line-4532"></a><span class='hs-comment'>{-
<a name="line-4533"></a>attr {
<a name="line-4534"></a>  default_value { b: false } name: "transpose_a" type: "bool"
<a name="line-4535"></a>}
<a name="line-4536"></a>attr {
<a name="line-4537"></a>  default_value { b: false } name: "transpose_b" type: "bool"
<a name="line-4538"></a>}
<a name="line-4539"></a>attr {
<a name="line-4540"></a>  default_value { b: false } name: "a_is_sparse" type: "bool"
<a name="line-4541"></a>}
<a name="line-4542"></a>attr {
<a name="line-4543"></a>  default_value { b: false } name: "b_is_sparse" type: "bool"
<a name="line-4544"></a>}
<a name="line-4545"></a>attr {
<a name="line-4546"></a>  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
<a name="line-4547"></a>  default_value { type: DT_FLOAT }
<a name="line-4548"></a>  name: "Ta"
<a name="line-4549"></a>  type: "type"
<a name="line-4550"></a>}
<a name="line-4551"></a>attr {
<a name="line-4552"></a>  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
<a name="line-4553"></a>  default_value { type: DT_FLOAT }
<a name="line-4554"></a>  name: "Tb"
<a name="line-4555"></a>  type: "type"
<a name="line-4556"></a>}
<a name="line-4557"></a>input_arg { name: "a" type_attr: "Ta" }
<a name="line-4558"></a>input_arg { name: "b" type_attr: "Tb" }
<a name="line-4559"></a>output_arg { name: "product" type: DT_FLOAT }
<a name="line-4560"></a>-}</span>
<a name="line-4561"></a>
<a name="line-4562"></a><a name="matMul"></a><span class='hs-comment'>-- | Multiply the matrix "a" by the matrix "b".</span>
<a name="line-4563"></a><span class='hs-comment'>--</span>
<a name="line-4564"></a><span class='hs-comment'>-- The inputs must be two-dimensional matrices and the inner dimension of</span>
<a name="line-4565"></a><span class='hs-comment'>-- "a" (after being transposed if transpose_a is true) must match the</span>
<a name="line-4566"></a><span class='hs-comment'>-- outer dimension of "b" (after being transposed if transposed_b is</span>
<a name="line-4567"></a><span class='hs-comment'>-- true).</span>
<a name="line-4568"></a><span class='hs-comment'>-- </span>
<a name="line-4569"></a><span class='hs-comment'>-- *Note*: The default kernel implementation for MatMul on GPUs uses</span>
<a name="line-4570"></a><span class='hs-comment'>-- cublas.</span>
<a name="line-4571"></a><span class='hs-definition'>matMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4572"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4573"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4574"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4575"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4576"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-4577"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b__</span>
<a name="line-4578"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __product__</span>
<a name="line-4579"></a><span class='hs-definition'>matMul</span> <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4580"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatMul"</span>
<a name="line-4581"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4582"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>b</span>
<a name="line-4583"></a><span class='hs-comment'>{-
<a name="line-4584"></a>attr {
<a name="line-4585"></a>  default_value { b: false }
<a name="line-4586"></a>  description: "If true, \"a\" is transposed before multiplication."
<a name="line-4587"></a>  name: "transpose_a"
<a name="line-4588"></a>  type: "bool"
<a name="line-4589"></a>}
<a name="line-4590"></a>attr {
<a name="line-4591"></a>  default_value { b: false }
<a name="line-4592"></a>  description: "If true, \"b\" is transposed before multiplication."
<a name="line-4593"></a>  name: "transpose_b"
<a name="line-4594"></a>  type: "bool"
<a name="line-4595"></a>}
<a name="line-4596"></a>attr {
<a name="line-4597"></a>  allowed_values {
<a name="line-4598"></a>    list {
<a name="line-4599"></a>      type: DT_HALF
<a name="line-4600"></a>      type: DT_FLOAT
<a name="line-4601"></a>      type: DT_DOUBLE
<a name="line-4602"></a>      type: DT_INT32
<a name="line-4603"></a>      type: DT_COMPLEX64
<a name="line-4604"></a>      type: DT_COMPLEX128
<a name="line-4605"></a>    }
<a name="line-4606"></a>  }
<a name="line-4607"></a>  name: "T"
<a name="line-4608"></a>  type: "type"
<a name="line-4609"></a>}
<a name="line-4610"></a>input_arg { name: "a" type_attr: "T" }
<a name="line-4611"></a>input_arg { name: "b" type_attr: "T" }
<a name="line-4612"></a>output_arg { name: "product" type_attr: "T" }
<a name="line-4613"></a>-}</span>
<a name="line-4614"></a>
<a name="line-4615"></a><a name="logicalAnd"></a><span class='hs-comment'>-- | Returns the truth value of x AND y element-wise.</span>
<a name="line-4616"></a><span class='hs-comment'>--</span>
<a name="line-4617"></a><span class='hs-comment'>-- *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting</span>
<a name="line-4618"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4619"></a><span class='hs-definition'>logicalAnd</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4620"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4621"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4622"></a><span class='hs-definition'>logicalAnd</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4623"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LogicalAnd"</span><span class='hs-layout'>)</span>
<a name="line-4624"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4625"></a><span class='hs-comment'>{-
<a name="line-4626"></a>input_arg { name: "x" type: DT_BOOL }
<a name="line-4627"></a>input_arg { name: "y" type: DT_BOOL }
<a name="line-4628"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-4629"></a>-}</span>
<a name="line-4630"></a>
<a name="line-4631"></a><a name="equal"></a><span class='hs-comment'>-- | Returns the truth value of (x == y) element-wise.</span>
<a name="line-4632"></a><span class='hs-comment'>--</span>
<a name="line-4633"></a><span class='hs-comment'>-- *NOTE*: `Equal` supports broadcasting. More about broadcasting</span>
<a name="line-4634"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4635"></a><span class='hs-definition'>equal</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4636"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4637"></a>                                                 <span class='hs-conid'>Bool</span><span class='hs-layout'>,</span>
<a name="line-4638"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-4639"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4640"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4641"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4642"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4643"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4644"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4645"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4646"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4647"></a><span class='hs-definition'>equal</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4648"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Equal"</span>
<a name="line-4649"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4650"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4651"></a><span class='hs-comment'>{-
<a name="line-4652"></a>attr {
<a name="line-4653"></a>  allowed_values {
<a name="line-4654"></a>    list {
<a name="line-4655"></a>      type: DT_HALF
<a name="line-4656"></a>      type: DT_FLOAT
<a name="line-4657"></a>      type: DT_DOUBLE
<a name="line-4658"></a>      type: DT_UINT8
<a name="line-4659"></a>      type: DT_INT8
<a name="line-4660"></a>      type: DT_INT16
<a name="line-4661"></a>      type: DT_INT32
<a name="line-4662"></a>      type: DT_INT64
<a name="line-4663"></a>      type: DT_COMPLEX64
<a name="line-4664"></a>      type: DT_QUINT8
<a name="line-4665"></a>      type: DT_QINT8
<a name="line-4666"></a>      type: DT_QINT32
<a name="line-4667"></a>      type: DT_STRING
<a name="line-4668"></a>      type: DT_BOOL
<a name="line-4669"></a>      type: DT_COMPLEX128
<a name="line-4670"></a>    }
<a name="line-4671"></a>  }
<a name="line-4672"></a>  name: "T"
<a name="line-4673"></a>  type: "type"
<a name="line-4674"></a>}
<a name="line-4675"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4676"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4677"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-4678"></a>-}</span>
<a name="line-4679"></a>
<a name="line-4680"></a><a name="greaterEqual"></a><span class='hs-comment'>-- | Returns the truth value of (x &gt;= y) element-wise.</span>
<a name="line-4681"></a><span class='hs-comment'>--</span>
<a name="line-4682"></a><span class='hs-comment'>-- *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting</span>
<a name="line-4683"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4684"></a><span class='hs-definition'>greaterEqual</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-4685"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4686"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4687"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4688"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4689"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4690"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4691"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4692"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4693"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4694"></a><span class='hs-definition'>greaterEqual</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4695"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"GreaterEqual"</span>
<a name="line-4696"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4697"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4698"></a><span class='hs-comment'>{-
<a name="line-4699"></a>attr {
<a name="line-4700"></a>  allowed_values {
<a name="line-4701"></a>    list {
<a name="line-4702"></a>      type: DT_FLOAT
<a name="line-4703"></a>      type: DT_DOUBLE
<a name="line-4704"></a>      type: DT_INT32
<a name="line-4705"></a>      type: DT_INT64
<a name="line-4706"></a>      type: DT_UINT8
<a name="line-4707"></a>      type: DT_INT16
<a name="line-4708"></a>      type: DT_INT8
<a name="line-4709"></a>      type: DT_UINT16
<a name="line-4710"></a>      type: DT_HALF
<a name="line-4711"></a>    }
<a name="line-4712"></a>  }
<a name="line-4713"></a>  name: "T"
<a name="line-4714"></a>  type: "type"
<a name="line-4715"></a>}
<a name="line-4716"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4717"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4718"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-4719"></a>-}</span>
<a name="line-4720"></a>
<a name="line-4721"></a><a name="lessEqual"></a><span class='hs-comment'>-- | Returns the truth value of (x &lt;= y) element-wise.</span>
<a name="line-4722"></a><span class='hs-comment'>--</span>
<a name="line-4723"></a><span class='hs-comment'>-- *NOTE*: `LessEqual` supports broadcasting. More about broadcasting</span>
<a name="line-4724"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4725"></a><span class='hs-definition'>lessEqual</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-4726"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4727"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4728"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4729"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4730"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4731"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4732"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4733"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4734"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4735"></a><span class='hs-definition'>lessEqual</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4736"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LessEqual"</span>
<a name="line-4737"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4738"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4739"></a><span class='hs-comment'>{-
<a name="line-4740"></a>attr {
<a name="line-4741"></a>  allowed_values {
<a name="line-4742"></a>    list {
<a name="line-4743"></a>      type: DT_FLOAT
<a name="line-4744"></a>      type: DT_DOUBLE
<a name="line-4745"></a>      type: DT_INT32
<a name="line-4746"></a>      type: DT_INT64
<a name="line-4747"></a>      type: DT_UINT8
<a name="line-4748"></a>      type: DT_INT16
<a name="line-4749"></a>      type: DT_INT8
<a name="line-4750"></a>      type: DT_UINT16
<a name="line-4751"></a>      type: DT_HALF
<a name="line-4752"></a>    }
<a name="line-4753"></a>  }
<a name="line-4754"></a>  name: "T"
<a name="line-4755"></a>  type: "type"
<a name="line-4756"></a>}
<a name="line-4757"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4758"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4759"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-4760"></a>-}</span>
<a name="line-4761"></a>
<a name="line-4762"></a><a name="less"></a><span class='hs-comment'>-- | Returns the truth value of (x &lt; y) element-wise.</span>
<a name="line-4763"></a><span class='hs-comment'>--</span>
<a name="line-4764"></a><span class='hs-comment'>-- *NOTE*: `Less` supports broadcasting. More about broadcasting</span>
<a name="line-4765"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4766"></a><span class='hs-definition'>less</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4767"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-4768"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-4769"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4770"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4771"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4772"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4773"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4774"></a><span class='hs-definition'>less</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4775"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Less"</span>
<a name="line-4776"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4777"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4778"></a><span class='hs-comment'>{-
<a name="line-4779"></a>attr {
<a name="line-4780"></a>  allowed_values {
<a name="line-4781"></a>    list {
<a name="line-4782"></a>      type: DT_FLOAT
<a name="line-4783"></a>      type: DT_DOUBLE
<a name="line-4784"></a>      type: DT_INT32
<a name="line-4785"></a>      type: DT_INT64
<a name="line-4786"></a>      type: DT_UINT8
<a name="line-4787"></a>      type: DT_INT16
<a name="line-4788"></a>      type: DT_INT8
<a name="line-4789"></a>      type: DT_UINT16
<a name="line-4790"></a>      type: DT_HALF
<a name="line-4791"></a>    }
<a name="line-4792"></a>  }
<a name="line-4793"></a>  name: "T"
<a name="line-4794"></a>  type: "type"
<a name="line-4795"></a>}
<a name="line-4796"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4797"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4798"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-4799"></a>-}</span>
<a name="line-4800"></a>
<a name="line-4801"></a><a name="polygamma"></a><span class='hs-comment'>-- | Compute the polygamma function \\(\psi^{(n)}(x)\\).</span>
<a name="line-4802"></a><span class='hs-comment'>--</span>
<a name="line-4803"></a><span class='hs-comment'>-- The polygamma function is defined as:</span>
<a name="line-4804"></a><span class='hs-comment'>-- </span>
<a name="line-4805"></a><span class='hs-comment'>-- ```</span>
<a name="line-4806"></a><span class='hs-comment'>-- \psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)</span>
<a name="line-4807"></a><span class='hs-comment'>-- ```</span>
<a name="line-4808"></a><span class='hs-comment'>-- where \\(\psi(x)\\) is the digamma function.</span>
<a name="line-4809"></a><span class='hs-definition'>polygamma</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4810"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-4811"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4812"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4813"></a><span class='hs-definition'>polygamma</span> <span class='hs-varid'>a</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4814"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Polygamma"</span>
<a name="line-4815"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4816"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>x</span>
<a name="line-4817"></a><span class='hs-comment'>{-
<a name="line-4818"></a>attr {
<a name="line-4819"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-4820"></a>  name: "T"
<a name="line-4821"></a>  type: "type"
<a name="line-4822"></a>}
<a name="line-4823"></a>input_arg { name: "a" type_attr: "T" }
<a name="line-4824"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4825"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4826"></a>-}</span>
<a name="line-4827"></a>
<a name="line-4828"></a><a name="igamma"></a><span class='hs-comment'>-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`.</span>
<a name="line-4829"></a><span class='hs-comment'>--</span>
<a name="line-4830"></a><span class='hs-comment'>-- The lower regularized incomplete Gamma function is defined as:</span>
<a name="line-4831"></a><span class='hs-comment'>-- </span>
<a name="line-4832"></a><span class='hs-comment'>-- ```</span>
<a name="line-4833"></a><span class='hs-comment'>-- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)</span>
<a name="line-4834"></a><span class='hs-comment'>-- ```</span>
<a name="line-4835"></a><span class='hs-comment'>-- where</span>
<a name="line-4836"></a><span class='hs-comment'>-- ```</span>
<a name="line-4837"></a><span class='hs-comment'>-- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt</span>
<a name="line-4838"></a><span class='hs-comment'>-- ```</span>
<a name="line-4839"></a><span class='hs-comment'>-- is the lower incomplete Gamma function.</span>
<a name="line-4840"></a><span class='hs-comment'>-- </span>
<a name="line-4841"></a><span class='hs-comment'>-- Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete</span>
<a name="line-4842"></a><span class='hs-comment'>-- Gamma function.</span>
<a name="line-4843"></a><span class='hs-definition'>igamma</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4844"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-4845"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4846"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4847"></a><span class='hs-definition'>igamma</span> <span class='hs-varid'>a</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4848"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Igamma"</span>
<a name="line-4849"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4850"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>x</span>
<a name="line-4851"></a><span class='hs-comment'>{-
<a name="line-4852"></a>attr {
<a name="line-4853"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-4854"></a>  name: "T"
<a name="line-4855"></a>  type: "type"
<a name="line-4856"></a>}
<a name="line-4857"></a>input_arg { name: "a" type_attr: "T" }
<a name="line-4858"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4859"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4860"></a>-}</span>
<a name="line-4861"></a>
<a name="line-4862"></a><a name="igammac"></a><span class='hs-comment'>-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`.</span>
<a name="line-4863"></a><span class='hs-comment'>--</span>
<a name="line-4864"></a><span class='hs-comment'>-- The upper regularized incomplete Gamma function is defined as:</span>
<a name="line-4865"></a><span class='hs-comment'>-- </span>
<a name="line-4866"></a><span class='hs-comment'>-- ```</span>
<a name="line-4867"></a><span class='hs-comment'>-- Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)</span>
<a name="line-4868"></a><span class='hs-comment'>-- ```</span>
<a name="line-4869"></a><span class='hs-comment'>-- where</span>
<a name="line-4870"></a><span class='hs-comment'>-- ```</span>
<a name="line-4871"></a><span class='hs-comment'>-- Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt</span>
<a name="line-4872"></a><span class='hs-comment'>-- ```</span>
<a name="line-4873"></a><span class='hs-comment'>-- is the upper incomplete Gama function.</span>
<a name="line-4874"></a><span class='hs-comment'>-- </span>
<a name="line-4875"></a><span class='hs-comment'>-- Note, above `P(a, x)` (`Igamma`) is the lower regularized complete</span>
<a name="line-4876"></a><span class='hs-comment'>-- Gamma function.</span>
<a name="line-4877"></a><span class='hs-definition'>igammac</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4878"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-4879"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4880"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4881"></a><span class='hs-definition'>igammac</span> <span class='hs-varid'>a</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4882"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Igammac"</span>
<a name="line-4883"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4884"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>x</span>
<a name="line-4885"></a><span class='hs-comment'>{-
<a name="line-4886"></a>attr {
<a name="line-4887"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-4888"></a>  name: "T"
<a name="line-4889"></a>  type: "type"
<a name="line-4890"></a>}
<a name="line-4891"></a>input_arg { name: "a" type_attr: "T" }
<a name="line-4892"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4893"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4894"></a>-}</span>
<a name="line-4895"></a>
<a name="line-4896"></a><a name="mod"></a><span class='hs-comment'>-- | Returns element-wise remainder of division.</span>
<a name="line-4897"></a><span class='hs-comment'>--</span>
<a name="line-4898"></a><span class='hs-comment'>-- *NOTE*: `Mod` supports broadcasting. More about broadcasting</span>
<a name="line-4899"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4900"></a><span class='hs-definition'>mod</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4901"></a>                                               <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4902"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4903"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4904"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4905"></a><span class='hs-definition'>mod</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4906"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Mod"</span>
<a name="line-4907"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4908"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4909"></a><span class='hs-comment'>{-
<a name="line-4910"></a>attr {
<a name="line-4911"></a>  allowed_values {
<a name="line-4912"></a>    list {
<a name="line-4913"></a>      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
<a name="line-4914"></a>    }
<a name="line-4915"></a>  }
<a name="line-4916"></a>  name: "T"
<a name="line-4917"></a>  type: "type"
<a name="line-4918"></a>}
<a name="line-4919"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4920"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4921"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4922"></a>-}</span>
<a name="line-4923"></a>
<a name="line-4924"></a><a name="minimum"></a><span class='hs-comment'>-- | Returns the min of x and y (i.e. x &lt; y ? x : y) element-wise.</span>
<a name="line-4925"></a><span class='hs-comment'>--</span>
<a name="line-4926"></a><span class='hs-comment'>-- *NOTE*: `Minimum` supports broadcasting. More about broadcasting</span>
<a name="line-4927"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4928"></a><span class='hs-definition'>minimum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4929"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4930"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4931"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4932"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4933"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4934"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4935"></a><span class='hs-definition'>minimum</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4936"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Minimum"</span>
<a name="line-4937"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4938"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4939"></a><span class='hs-comment'>{-
<a name="line-4940"></a>attr {
<a name="line-4941"></a>  allowed_values {
<a name="line-4942"></a>    list {
<a name="line-4943"></a>      type: DT_HALF
<a name="line-4944"></a>      type: DT_FLOAT
<a name="line-4945"></a>      type: DT_DOUBLE
<a name="line-4946"></a>      type: DT_INT32
<a name="line-4947"></a>      type: DT_INT64
<a name="line-4948"></a>    }
<a name="line-4949"></a>  }
<a name="line-4950"></a>  name: "T"
<a name="line-4951"></a>  type: "type"
<a name="line-4952"></a>}
<a name="line-4953"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4954"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4955"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4956"></a>-}</span>
<a name="line-4957"></a>
<a name="line-4958"></a><a name="maximum"></a><span class='hs-comment'>-- | Returns the max of x and y (i.e. x &gt; y ? x : y) element-wise.</span>
<a name="line-4959"></a><span class='hs-comment'>--</span>
<a name="line-4960"></a><span class='hs-comment'>-- *NOTE*: `Maximum` supports broadcasting. More about broadcasting</span>
<a name="line-4961"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4962"></a><span class='hs-definition'>maximum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-4963"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-4964"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-4965"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-4966"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-4967"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-4968"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-4969"></a><span class='hs-definition'>maximum</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-4970"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Maximum"</span>
<a name="line-4971"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-4972"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-4973"></a><span class='hs-comment'>{-
<a name="line-4974"></a>attr {
<a name="line-4975"></a>  allowed_values {
<a name="line-4976"></a>    list {
<a name="line-4977"></a>      type: DT_HALF
<a name="line-4978"></a>      type: DT_FLOAT
<a name="line-4979"></a>      type: DT_DOUBLE
<a name="line-4980"></a>      type: DT_INT32
<a name="line-4981"></a>      type: DT_INT64
<a name="line-4982"></a>    }
<a name="line-4983"></a>  }
<a name="line-4984"></a>  name: "T"
<a name="line-4985"></a>  type: "type"
<a name="line-4986"></a>}
<a name="line-4987"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-4988"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-4989"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-4990"></a>-}</span>
<a name="line-4991"></a>
<a name="line-4992"></a><a name="squaredDifference"></a><span class='hs-comment'>-- | Returns (x - y)(x - y) element-wise.</span>
<a name="line-4993"></a><span class='hs-comment'>--</span>
<a name="line-4994"></a><span class='hs-comment'>-- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting</span>
<a name="line-4995"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-4996"></a><span class='hs-definition'>squaredDifference</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-4997"></a>                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4998"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-4999"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-5000"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5001"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5002"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5003"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5004"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-5005"></a><span class='hs-definition'>squaredDifference</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5006"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SquaredDifference"</span>
<a name="line-5007"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5008"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-5009"></a><span class='hs-comment'>{-
<a name="line-5010"></a>attr {
<a name="line-5011"></a>  allowed_values {
<a name="line-5012"></a>    list {
<a name="line-5013"></a>      type: DT_HALF
<a name="line-5014"></a>      type: DT_FLOAT
<a name="line-5015"></a>      type: DT_DOUBLE
<a name="line-5016"></a>      type: DT_INT32
<a name="line-5017"></a>      type: DT_INT64
<a name="line-5018"></a>      type: DT_COMPLEX64
<a name="line-5019"></a>      type: DT_COMPLEX128
<a name="line-5020"></a>    }
<a name="line-5021"></a>  }
<a name="line-5022"></a>  name: "T"
<a name="line-5023"></a>  type: "type"
<a name="line-5024"></a>}
<a name="line-5025"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5026"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-5027"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-5028"></a>-}</span>
<a name="line-5029"></a>
<a name="line-5030"></a><span class='hs-comment'>-- | Computes softplus gradients for a softplus operation.</span>
<a name="line-5031"></a>
<a name="line-5032"></a><a name="softplusGrad"></a><span class='hs-definition'>softplusGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-5033"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5034"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-5035"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-5036"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-5037"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5038"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5039"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation.</span>
<a name="line-5040"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: The features passed as input to the corresponding softplus operation.</span>
<a name="line-5041"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`.</span>
<a name="line-5042"></a><span class='hs-definition'>softplusGrad</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5043"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SoftplusGrad"</span>
<a name="line-5044"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5045"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span>
<a name="line-5046"></a><span class='hs-comment'>{-
<a name="line-5047"></a>attr {
<a name="line-5048"></a>  allowed_values {
<a name="line-5049"></a>    list {
<a name="line-5050"></a>      type: DT_FLOAT
<a name="line-5051"></a>      type: DT_DOUBLE
<a name="line-5052"></a>      type: DT_INT32
<a name="line-5053"></a>      type: DT_INT64
<a name="line-5054"></a>      type: DT_UINT8
<a name="line-5055"></a>      type: DT_INT16
<a name="line-5056"></a>      type: DT_INT8
<a name="line-5057"></a>      type: DT_UINT16
<a name="line-5058"></a>      type: DT_HALF
<a name="line-5059"></a>    }
<a name="line-5060"></a>  }
<a name="line-5061"></a>  name: "T"
<a name="line-5062"></a>  type: "type"
<a name="line-5063"></a>}
<a name="line-5064"></a>input_arg {
<a name="line-5065"></a>  description: "The backpropagated gradients to the corresponding softplus operation."
<a name="line-5066"></a>  name: "gradients"
<a name="line-5067"></a>  type_attr: "T"
<a name="line-5068"></a>}
<a name="line-5069"></a>input_arg {
<a name="line-5070"></a>  description: "The features passed as input to the corresponding softplus operation."
<a name="line-5071"></a>  name: "features"
<a name="line-5072"></a>  type_attr: "T"
<a name="line-5073"></a>}
<a name="line-5074"></a>output_arg {
<a name="line-5075"></a>  description: "The gradients: `gradients / (1 + exp(-features))`."
<a name="line-5076"></a>  name: "backprops"
<a name="line-5077"></a>  type_attr: "T"
<a name="line-5078"></a>}
<a name="line-5079"></a>-}</span>
<a name="line-5080"></a>
<a name="line-5081"></a><a name="batchToSpace"></a><span class='hs-comment'>-- | BatchToSpace for 4-D tensors of type T.</span>
<a name="line-5082"></a><span class='hs-comment'>--</span>
<a name="line-5083"></a><span class='hs-comment'>-- This is a legacy version of the more general BatchToSpaceND.</span>
<a name="line-5084"></a><span class='hs-comment'>-- </span>
<a name="line-5085"></a><span class='hs-comment'>-- Rearranges (permutes) data from batch into blocks of spatial data, followed by</span>
<a name="line-5086"></a><span class='hs-comment'>-- cropping. This is the reverse transformation of SpaceToBatch. More specifically,</span>
<a name="line-5087"></a><span class='hs-comment'>-- this op outputs a copy of the input tensor where values from the `batch`</span>
<a name="line-5088"></a><span class='hs-comment'>-- dimension are moved in spatial blocks to the `height` and `width` dimensions,</span>
<a name="line-5089"></a><span class='hs-comment'>-- followed by cropping along the `height` and `width` dimensions.</span>
<a name="line-5090"></a><span class='hs-definition'>batchToSpace</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-5091"></a>                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5092"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5093"></a>                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __block_size__</span>
<a name="line-5094"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D tensor with shape</span>
<a name="line-5095"></a>                               <span class='hs-comment'>-- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,</span>
<a name="line-5096"></a>                               <span class='hs-comment'>--   depth]`. Note that the batch size of the input tensor must be divisible by</span>
<a name="line-5097"></a>                               <span class='hs-comment'>-- `block_size * block_size`.</span>
<a name="line-5098"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies</span>
<a name="line-5099"></a>                                  <span class='hs-comment'>-- how many elements to crop from the intermediate result across the spatial</span>
<a name="line-5100"></a>                                  <span class='hs-comment'>-- dimensions as follows:</span>
<a name="line-5101"></a>                                  <span class='hs-comment'>-- </span>
<a name="line-5102"></a>                                  <span class='hs-comment'>--     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]</span>
<a name="line-5103"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where:</span>
<a name="line-5104"></a>                <span class='hs-comment'>-- </span>
<a name="line-5105"></a>                <span class='hs-comment'>--       height = height_pad - crop_top - crop_bottom</span>
<a name="line-5106"></a>                <span class='hs-comment'>--       width = width_pad - crop_left - crop_right</span>
<a name="line-5107"></a>                <span class='hs-comment'>-- </span>
<a name="line-5108"></a>                <span class='hs-comment'>-- The attr `block_size` must be greater than one. It indicates the block size.</span>
<a name="line-5109"></a>                <span class='hs-comment'>-- </span>
<a name="line-5110"></a>                <span class='hs-comment'>-- Some examples:</span>
<a name="line-5111"></a>                <span class='hs-comment'>-- </span>
<a name="line-5112"></a>                <span class='hs-comment'>-- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:</span>
<a name="line-5113"></a>                <span class='hs-comment'>-- </span>
<a name="line-5114"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5115"></a>                <span class='hs-comment'>-- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]</span>
<a name="line-5116"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5117"></a>                <span class='hs-comment'>-- </span>
<a name="line-5118"></a>                <span class='hs-comment'>-- The output tensor has shape `[1, 2, 2, 1]` and value:</span>
<a name="line-5119"></a>                <span class='hs-comment'>-- </span>
<a name="line-5120"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5121"></a>                <span class='hs-comment'>-- x = [[[[1], [2]], [[3], [4]]]]</span>
<a name="line-5122"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5123"></a>                <span class='hs-comment'>-- </span>
<a name="line-5124"></a>                <span class='hs-comment'>-- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:</span>
<a name="line-5125"></a>                <span class='hs-comment'>-- </span>
<a name="line-5126"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5127"></a>                <span class='hs-comment'>-- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]</span>
<a name="line-5128"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5129"></a>                <span class='hs-comment'>-- </span>
<a name="line-5130"></a>                <span class='hs-comment'>-- The output tensor has shape `[1, 2, 2, 3]` and value:</span>
<a name="line-5131"></a>                <span class='hs-comment'>-- </span>
<a name="line-5132"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5133"></a>                <span class='hs-comment'>-- x = [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-5134"></a>                <span class='hs-comment'>--       [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-5135"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5136"></a>                <span class='hs-comment'>-- </span>
<a name="line-5137"></a>                <span class='hs-comment'>-- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:</span>
<a name="line-5138"></a>                <span class='hs-comment'>-- </span>
<a name="line-5139"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5140"></a>                <span class='hs-comment'>-- x = [[[[1], [3]], [[5], [7]]],</span>
<a name="line-5141"></a>                <span class='hs-comment'>--      [[[2], [4]], [[10], [12]]],</span>
<a name="line-5142"></a>                <span class='hs-comment'>--      [[[5], [7]], [[13], [15]]],</span>
<a name="line-5143"></a>                <span class='hs-comment'>--      [[[6], [8]], [[14], [16]]]]</span>
<a name="line-5144"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5145"></a>                <span class='hs-comment'>-- </span>
<a name="line-5146"></a>                <span class='hs-comment'>-- The output tensor has shape `[1, 4, 4, 1]` and value:</span>
<a name="line-5147"></a>                <span class='hs-comment'>-- </span>
<a name="line-5148"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5149"></a>                <span class='hs-comment'>-- x = [[[1],   [2],  [3],  [4]],</span>
<a name="line-5150"></a>                <span class='hs-comment'>--      [[5],   [6],  [7],  [8]],</span>
<a name="line-5151"></a>                <span class='hs-comment'>--      [[9],  [10], [11],  [12]],</span>
<a name="line-5152"></a>                <span class='hs-comment'>--      [[13], [14], [15],  [16]]]</span>
<a name="line-5153"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5154"></a>                <span class='hs-comment'>-- </span>
<a name="line-5155"></a>                <span class='hs-comment'>-- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:</span>
<a name="line-5156"></a>                <span class='hs-comment'>-- </span>
<a name="line-5157"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5158"></a>                <span class='hs-comment'>-- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],</span>
<a name="line-5159"></a>                <span class='hs-comment'>--      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]</span>
<a name="line-5160"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5161"></a>                <span class='hs-comment'>-- </span>
<a name="line-5162"></a>                <span class='hs-comment'>-- The output tensor has shape `[2, 2, 4, 1]` and value:</span>
<a name="line-5163"></a>                <span class='hs-comment'>-- </span>
<a name="line-5164"></a>                <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-5165"></a>                <span class='hs-comment'>-- x = [[[[1], [3]], [[5], [7]]],</span>
<a name="line-5166"></a>                <span class='hs-comment'>--      [[[2], [4]], [[10], [12]]],</span>
<a name="line-5167"></a>                <span class='hs-comment'>--      [[[5], [7]], [[13], [15]]],</span>
<a name="line-5168"></a>                <span class='hs-comment'>--      [[[6], [8]], [[14], [16]]]]</span>
<a name="line-5169"></a>                <span class='hs-comment'>-- ```</span>
<a name="line-5170"></a><span class='hs-definition'>batchToSpace</span> <span class='hs-varid'>block_size</span> <span class='hs-varid'>input</span> <span class='hs-varid'>crops</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5171"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchToSpace"</span>
<a name="line-5172"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-5173"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span>
<a name="line-5174"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"block_size"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>block_size</span><span class='hs-layout'>)</span>
<a name="line-5175"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>crops</span>
<a name="line-5176"></a><span class='hs-comment'>{-
<a name="line-5177"></a>attr { name: "T" type: "type" }
<a name="line-5178"></a>attr {
<a name="line-5179"></a>  has_minimum: true minimum: 2 name: "block_size" type: "int"
<a name="line-5180"></a>}
<a name="line-5181"></a>attr {
<a name="line-5182"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-5183"></a>  default_value { type: DT_INT32 }
<a name="line-5184"></a>  name: "Tidx"
<a name="line-5185"></a>  type: "type"
<a name="line-5186"></a>}
<a name="line-5187"></a>input_arg {
<a name="line-5188"></a>  description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n  depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`."
<a name="line-5189"></a>  name: "input"
<a name="line-5190"></a>  type_attr: "T"
<a name="line-5191"></a>}
<a name="line-5192"></a>input_arg {
<a name="line-5193"></a>  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]"
<a name="line-5194"></a>  name: "crops"
<a name="line-5195"></a>  type_attr: "Tidx"
<a name="line-5196"></a>}
<a name="line-5197"></a>output_arg {
<a name="line-5198"></a>  description: "4-D with shape `[batch, height, width, depth]`, where:\n\n      height = height_pad - crop_top - crop_bottom\n      width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```"
<a name="line-5199"></a>  name: "output"
<a name="line-5200"></a>  type_attr: "T"
<a name="line-5201"></a>}
<a name="line-5202"></a>-}</span>
<a name="line-5203"></a>
<a name="line-5204"></a><a name="mul"></a><span class='hs-comment'>-- | Returns x * y element-wise.</span>
<a name="line-5205"></a><span class='hs-comment'>--</span>
<a name="line-5206"></a><span class='hs-comment'>-- *NOTE*: `Mul` supports broadcasting. More about broadcasting</span>
<a name="line-5207"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-5208"></a><span class='hs-definition'>mul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5209"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5210"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5211"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-5212"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-5213"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5214"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5215"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5216"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5217"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-5218"></a><span class='hs-definition'>mul</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5219"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Mul"</span>
<a name="line-5220"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5221"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-5222"></a><span class='hs-comment'>{-
<a name="line-5223"></a>attr {
<a name="line-5224"></a>  allowed_values {
<a name="line-5225"></a>    list {
<a name="line-5226"></a>      type: DT_HALF
<a name="line-5227"></a>      type: DT_FLOAT
<a name="line-5228"></a>      type: DT_DOUBLE
<a name="line-5229"></a>      type: DT_UINT8
<a name="line-5230"></a>      type: DT_INT8
<a name="line-5231"></a>      type: DT_UINT16
<a name="line-5232"></a>      type: DT_INT16
<a name="line-5233"></a>      type: DT_INT32
<a name="line-5234"></a>      type: DT_INT64
<a name="line-5235"></a>      type: DT_COMPLEX64
<a name="line-5236"></a>      type: DT_COMPLEX128
<a name="line-5237"></a>    }
<a name="line-5238"></a>  }
<a name="line-5239"></a>  name: "T"
<a name="line-5240"></a>  type: "type"
<a name="line-5241"></a>}
<a name="line-5242"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5243"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-5244"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-5245"></a>-}</span>
<a name="line-5246"></a>
<a name="line-5247"></a><a name="rint"></a><span class='hs-comment'>-- | Returns element-wise integer closest to x.</span>
<a name="line-5248"></a><span class='hs-comment'>--</span>
<a name="line-5249"></a><span class='hs-comment'>-- If the result is midway between two representable values,</span>
<a name="line-5250"></a><span class='hs-comment'>-- the even representable is chosen.</span>
<a name="line-5251"></a><span class='hs-comment'>-- For example:</span>
<a name="line-5252"></a><span class='hs-comment'>-- </span>
<a name="line-5253"></a><span class='hs-comment'>-- ```</span>
<a name="line-5254"></a><span class='hs-comment'>-- rint(-1.5) ==&gt; -2.0</span>
<a name="line-5255"></a><span class='hs-comment'>-- rint(0.5000001) ==&gt; 1.0</span>
<a name="line-5256"></a><span class='hs-comment'>-- rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==&gt; [-2., -2., -0., 0., 2., 2., 2.]</span>
<a name="line-5257"></a><span class='hs-comment'>-- ```</span>
<a name="line-5258"></a><span class='hs-definition'>rint</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5259"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5260"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5261"></a><span class='hs-definition'>rint</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5262"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Rint"</span>
<a name="line-5263"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5264"></a>        <span class='hs-varid'>x</span>
<a name="line-5265"></a><span class='hs-comment'>{-
<a name="line-5266"></a>attr {
<a name="line-5267"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-5268"></a>  name: "T"
<a name="line-5269"></a>  type: "type"
<a name="line-5270"></a>}
<a name="line-5271"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5272"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5273"></a>-}</span>
<a name="line-5274"></a>
<a name="line-5275"></a><span class='hs-comment'>-- | Returns element-wise smallest integer in not less than x.</span>
<a name="line-5276"></a>
<a name="line-5277"></a><a name="ceil"></a><span class='hs-definition'>ceil</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5278"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5279"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5280"></a><span class='hs-definition'>ceil</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5281"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Ceil"</span>
<a name="line-5282"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5283"></a>        <span class='hs-varid'>x</span>
<a name="line-5284"></a><span class='hs-comment'>{-
<a name="line-5285"></a>attr {
<a name="line-5286"></a>  allowed_values {
<a name="line-5287"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5288"></a>  }
<a name="line-5289"></a>  name: "T"
<a name="line-5290"></a>  type: "type"
<a name="line-5291"></a>}
<a name="line-5292"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5293"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5294"></a>-}</span>
<a name="line-5295"></a>
<a name="line-5296"></a><span class='hs-comment'>-- | Returns element-wise largest integer not greater than x.</span>
<a name="line-5297"></a>
<a name="line-5298"></a><a name="floor"></a><span class='hs-definition'>floor</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5299"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5300"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5301"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5302"></a><span class='hs-definition'>floor</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5303"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Floor"</span>
<a name="line-5304"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5305"></a>        <span class='hs-varid'>x</span>
<a name="line-5306"></a><span class='hs-comment'>{-
<a name="line-5307"></a>attr {
<a name="line-5308"></a>  allowed_values {
<a name="line-5309"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5310"></a>  }
<a name="line-5311"></a>  name: "T"
<a name="line-5312"></a>  type: "type"
<a name="line-5313"></a>}
<a name="line-5314"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5315"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5316"></a>-}</span>
<a name="line-5317"></a>
<a name="line-5318"></a><span class='hs-comment'>-- | Performs 3D max pooling on the input.</span>
<a name="line-5319"></a>
<a name="line-5320"></a><a name="maxPool3D"></a><span class='hs-definition'>maxPool3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5321"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5322"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-5323"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5324"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-5325"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-5326"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5327"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5328"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.</span>
<a name="line-5329"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The max pooled output tensor.</span>
<a name="line-5330"></a><span class='hs-definition'>maxPool3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5331"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPool3D"</span>
<a name="line-5332"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5333"></a>        <span class='hs-varid'>input</span>
<a name="line-5334"></a><span class='hs-comment'>{-
<a name="line-5335"></a>attr {
<a name="line-5336"></a>  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
<a name="line-5337"></a>  has_minimum: true
<a name="line-5338"></a>  minimum: 5
<a name="line-5339"></a>  name: "ksize"
<a name="line-5340"></a>  type: "list(int)"
<a name="line-5341"></a>}
<a name="line-5342"></a>attr {
<a name="line-5343"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-5344"></a>  has_minimum: true
<a name="line-5345"></a>  minimum: 5
<a name="line-5346"></a>  name: "strides"
<a name="line-5347"></a>  type: "list(int)"
<a name="line-5348"></a>}
<a name="line-5349"></a>attr {
<a name="line-5350"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-5351"></a>  description: "The type of padding algorithm to use."
<a name="line-5352"></a>  name: "padding"
<a name="line-5353"></a>  type: "string"
<a name="line-5354"></a>}
<a name="line-5355"></a>attr {
<a name="line-5356"></a>  allowed_values {
<a name="line-5357"></a>    list {
<a name="line-5358"></a>      type: DT_FLOAT
<a name="line-5359"></a>      type: DT_DOUBLE
<a name="line-5360"></a>      type: DT_INT64
<a name="line-5361"></a>      type: DT_INT32
<a name="line-5362"></a>      type: DT_UINT8
<a name="line-5363"></a>      type: DT_UINT16
<a name="line-5364"></a>      type: DT_INT16
<a name="line-5365"></a>      type: DT_INT8
<a name="line-5366"></a>      type: DT_COMPLEX64
<a name="line-5367"></a>      type: DT_COMPLEX128
<a name="line-5368"></a>      type: DT_QINT8
<a name="line-5369"></a>      type: DT_QUINT8
<a name="line-5370"></a>      type: DT_QINT32
<a name="line-5371"></a>      type: DT_HALF
<a name="line-5372"></a>    }
<a name="line-5373"></a>  }
<a name="line-5374"></a>  name: "T"
<a name="line-5375"></a>  type: "type"
<a name="line-5376"></a>}
<a name="line-5377"></a>input_arg {
<a name="line-5378"></a>  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
<a name="line-5379"></a>  name: "input"
<a name="line-5380"></a>  type_attr: "T"
<a name="line-5381"></a>}
<a name="line-5382"></a>output_arg {
<a name="line-5383"></a>  description: "The max pooled output tensor."
<a name="line-5384"></a>  name: "output"
<a name="line-5385"></a>  type_attr: "T"
<a name="line-5386"></a>}
<a name="line-5387"></a>-}</span>
<a name="line-5388"></a>
<a name="line-5389"></a><a name="isInf"></a><span class='hs-comment'>-- | Returns which elements of x are Inf.</span>
<a name="line-5390"></a><span class='hs-comment'>--</span>
<a name="line-5391"></a><span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-5392"></a><span class='hs-comment'>-- Equivalent to np.isinf</span>
<a name="line-5393"></a><span class='hs-comment'>-- @end_compatibility</span>
<a name="line-5394"></a><span class='hs-definition'>isInf</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5395"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5396"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5397"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5398"></a><span class='hs-definition'>isInf</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5399"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IsInf"</span>
<a name="line-5400"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5401"></a>        <span class='hs-varid'>x</span>
<a name="line-5402"></a><span class='hs-comment'>{-
<a name="line-5403"></a>attr {
<a name="line-5404"></a>  allowed_values {
<a name="line-5405"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5406"></a>  }
<a name="line-5407"></a>  name: "T"
<a name="line-5408"></a>  type: "type"
<a name="line-5409"></a>}
<a name="line-5410"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5411"></a>output_arg { name: "y" type: DT_BOOL }
<a name="line-5412"></a>-}</span>
<a name="line-5413"></a>
<a name="line-5414"></a><span class='hs-comment'>-- | Computes the gradients of depthwise convolution with respect to the input.</span>
<a name="line-5415"></a>
<a name="line-5416"></a><a name="depthwiseConv2dNativeBackpropInput"></a><span class='hs-definition'>depthwiseConv2dNativeBackpropInput</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-5417"></a>                                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5418"></a>                                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5419"></a>                                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __input_sizes__: An integer vector representing the shape of `input`,</span>
<a name="line-5420"></a>                                                               <span class='hs-comment'>-- where `input` is a 4-D `[batch, height, width, channels]` tensor.</span>
<a name="line-5421"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 4-D with shape</span>
<a name="line-5422"></a>                                                     <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, depthwise_multiplier]`.</span>
<a name="line-5423"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.</span>
<a name="line-5424"></a>                                                     <span class='hs-comment'>-- Gradients w.r.t. the output of the convolution.</span>
<a name="line-5425"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient</span>
<a name="line-5426"></a>                                      <span class='hs-comment'>-- w.r.t. the input of the convolution.</span>
<a name="line-5427"></a><span class='hs-definition'>depthwiseConv2dNativeBackpropInput</span> <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span>
<a name="line-5428"></a>                                   <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5429"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DepthwiseConv2dNativeBackpropInput"</span>
<a name="line-5430"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5431"></a>        <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-5432"></a><span class='hs-comment'>{-
<a name="line-5433"></a>attr {
<a name="line-5434"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-5435"></a>  name: "T"
<a name="line-5436"></a>  type: "type"
<a name="line-5437"></a>}
<a name="line-5438"></a>attr {
<a name="line-5439"></a>  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
<a name="line-5440"></a>  name: "strides"
<a name="line-5441"></a>  type: "list(int)"
<a name="line-5442"></a>}
<a name="line-5443"></a>attr {
<a name="line-5444"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-5445"></a>  description: "The type of padding algorithm to use."
<a name="line-5446"></a>  name: "padding"
<a name="line-5447"></a>  type: "string"
<a name="line-5448"></a>}
<a name="line-5449"></a>input_arg {
<a name="line-5450"></a>  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
<a name="line-5451"></a>  name: "input_sizes"
<a name="line-5452"></a>  type: DT_INT32
<a name="line-5453"></a>}
<a name="line-5454"></a>input_arg {
<a name="line-5455"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`."
<a name="line-5456"></a>  name: "filter"
<a name="line-5457"></a>  type_attr: "T"
<a name="line-5458"></a>}
<a name="line-5459"></a>input_arg {
<a name="line-5460"></a>  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
<a name="line-5461"></a>  name: "out_backprop"
<a name="line-5462"></a>  type_attr: "T"
<a name="line-5463"></a>}
<a name="line-5464"></a>output_arg {
<a name="line-5465"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
<a name="line-5466"></a>  name: "output"
<a name="line-5467"></a>  type_attr: "T"
<a name="line-5468"></a>}
<a name="line-5469"></a>-}</span>
<a name="line-5470"></a>
<a name="line-5471"></a><a name="isNan"></a><span class='hs-comment'>-- | Returns which elements of x are NaN.</span>
<a name="line-5472"></a><span class='hs-comment'>--</span>
<a name="line-5473"></a><span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-5474"></a><span class='hs-comment'>-- Equivalent to np.isnan</span>
<a name="line-5475"></a><span class='hs-comment'>-- @end_compatibility</span>
<a name="line-5476"></a><span class='hs-definition'>isNan</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5477"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5478"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5479"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5480"></a><span class='hs-definition'>isNan</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5481"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IsNan"</span>
<a name="line-5482"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5483"></a>        <span class='hs-varid'>x</span>
<a name="line-5484"></a><span class='hs-comment'>{-
<a name="line-5485"></a>attr {
<a name="line-5486"></a>  allowed_values {
<a name="line-5487"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5488"></a>  }
<a name="line-5489"></a>  name: "T"
<a name="line-5490"></a>  type: "type"
<a name="line-5491"></a>}
<a name="line-5492"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5493"></a>output_arg { name: "y" type: DT_BOOL }
<a name="line-5494"></a>-}</span>
<a name="line-5495"></a>
<a name="line-5496"></a><a name="log1p"></a><span class='hs-comment'>-- | Computes natural logarithm of (1 + x) element-wise.</span>
<a name="line-5497"></a><span class='hs-comment'>--</span>
<a name="line-5498"></a><span class='hs-comment'>-- I.e., \\(y = \log_e (1 + x)\\).</span>
<a name="line-5499"></a><span class='hs-definition'>log1p</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5500"></a>                                              <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5501"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5502"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5503"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5504"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5505"></a><span class='hs-definition'>log1p</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5506"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Log1p"</span>
<a name="line-5507"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5508"></a>        <span class='hs-varid'>x</span>
<a name="line-5509"></a><span class='hs-comment'>{-
<a name="line-5510"></a>attr {
<a name="line-5511"></a>  allowed_values {
<a name="line-5512"></a>    list {
<a name="line-5513"></a>      type: DT_HALF
<a name="line-5514"></a>      type: DT_FLOAT
<a name="line-5515"></a>      type: DT_DOUBLE
<a name="line-5516"></a>      type: DT_COMPLEX64
<a name="line-5517"></a>      type: DT_COMPLEX128
<a name="line-5518"></a>    }
<a name="line-5519"></a>  }
<a name="line-5520"></a>  name: "T"
<a name="line-5521"></a>  type: "type"
<a name="line-5522"></a>}
<a name="line-5523"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5524"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5525"></a>-}</span>
<a name="line-5526"></a>
<a name="line-5527"></a><span class='hs-comment'>-- | Computes asin of x element-wise.</span>
<a name="line-5528"></a>
<a name="line-5529"></a><a name="asin"></a><span class='hs-definition'>asin</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5530"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5531"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-5532"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5533"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5534"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5535"></a><span class='hs-definition'>asin</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5536"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Asin"</span>
<a name="line-5537"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5538"></a>        <span class='hs-varid'>x</span>
<a name="line-5539"></a><span class='hs-comment'>{-
<a name="line-5540"></a>attr {
<a name="line-5541"></a>  allowed_values {
<a name="line-5542"></a>    list {
<a name="line-5543"></a>      type: DT_HALF
<a name="line-5544"></a>      type: DT_FLOAT
<a name="line-5545"></a>      type: DT_DOUBLE
<a name="line-5546"></a>      type: DT_INT32
<a name="line-5547"></a>      type: DT_INT64
<a name="line-5548"></a>      type: DT_COMPLEX64
<a name="line-5549"></a>      type: DT_COMPLEX128
<a name="line-5550"></a>    }
<a name="line-5551"></a>  }
<a name="line-5552"></a>  name: "T"
<a name="line-5553"></a>  type: "type"
<a name="line-5554"></a>}
<a name="line-5555"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5556"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5557"></a>-}</span>
<a name="line-5558"></a>
<a name="line-5559"></a><a name="topKV2"></a><span class='hs-comment'>-- | Finds values and indices of the `k` largest elements for the last dimension.</span>
<a name="line-5560"></a><span class='hs-comment'>--</span>
<a name="line-5561"></a><span class='hs-comment'>-- If the input is a vector (rank-1), finds the `k` largest entries in the vector</span>
<a name="line-5562"></a><span class='hs-comment'>-- and outputs their values and indices as vectors.  Thus `values[j]` is the</span>
<a name="line-5563"></a><span class='hs-comment'>-- `j`-th largest entry in `input`, and its index is `indices[j]`.</span>
<a name="line-5564"></a><span class='hs-comment'>-- </span>
<a name="line-5565"></a><span class='hs-comment'>-- For matrices (resp. higher rank input), computes the top `k` entries in each</span>
<a name="line-5566"></a><span class='hs-comment'>-- row (resp. vector along the last dimension).  Thus,</span>
<a name="line-5567"></a><span class='hs-comment'>-- </span>
<a name="line-5568"></a><span class='hs-comment'>--     values.shape = indices.shape = input.shape[:-1] + [k]</span>
<a name="line-5569"></a><span class='hs-comment'>-- </span>
<a name="line-5570"></a><span class='hs-comment'>-- If two elements are equal, the lower-index element appears first.</span>
<a name="line-5571"></a><span class='hs-comment'>-- </span>
<a name="line-5572"></a><span class='hs-comment'>-- This is the same as `TopK`, but takes `k` as in input rather than an attr.</span>
<a name="line-5573"></a><span class='hs-definition'>topKV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-5574"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5575"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-5576"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-5577"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5578"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5579"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 1-D or higher with last dimension at least `k`.</span>
<a name="line-5580"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __k__: 0-D.  Number of top elements to look for along the last dimension (along each</span>
<a name="line-5581"></a>                                      <span class='hs-comment'>-- row for matrices).</span>
<a name="line-5582"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span>
<a name="line-5583"></a>          <span class='hs-comment'>-- ^ (__values__, __indices__)</span>
<a name="line-5584"></a>          <span class='hs-comment'>--</span>
<a name="line-5585"></a>          <span class='hs-comment'>-- * __values__: The `k` largest elements along each last dimensional slice.</span>
<a name="line-5586"></a>          <span class='hs-comment'>--</span>
<a name="line-5587"></a>          <span class='hs-comment'>-- * __indices__: The indices of `values` within the last dimension of `input`.</span>
<a name="line-5588"></a><span class='hs-definition'>topKV2</span> <span class='hs-varid'>input</span> <span class='hs-varid'>k</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5589"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TopKV2"</span>
<a name="line-5590"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5591"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>k</span>
<a name="line-5592"></a><span class='hs-comment'>{-
<a name="line-5593"></a>attr {
<a name="line-5594"></a>  default_value { b: true }
<a name="line-5595"></a>  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
<a name="line-5596"></a>  name: "sorted"
<a name="line-5597"></a>  type: "bool"
<a name="line-5598"></a>}
<a name="line-5599"></a>attr {
<a name="line-5600"></a>  allowed_values {
<a name="line-5601"></a>    list {
<a name="line-5602"></a>      type: DT_FLOAT
<a name="line-5603"></a>      type: DT_DOUBLE
<a name="line-5604"></a>      type: DT_INT32
<a name="line-5605"></a>      type: DT_INT64
<a name="line-5606"></a>      type: DT_UINT8
<a name="line-5607"></a>      type: DT_INT16
<a name="line-5608"></a>      type: DT_INT8
<a name="line-5609"></a>      type: DT_UINT16
<a name="line-5610"></a>      type: DT_HALF
<a name="line-5611"></a>    }
<a name="line-5612"></a>  }
<a name="line-5613"></a>  name: "T"
<a name="line-5614"></a>  type: "type"
<a name="line-5615"></a>}
<a name="line-5616"></a>input_arg {
<a name="line-5617"></a>  description: "1-D or higher with last dimension at least `k`."
<a name="line-5618"></a>  name: "input"
<a name="line-5619"></a>  type_attr: "T"
<a name="line-5620"></a>}
<a name="line-5621"></a>input_arg {
<a name="line-5622"></a>  description: "0-D.  Number of top elements to look for along the last dimension (along each\nrow for matrices)."
<a name="line-5623"></a>  name: "k"
<a name="line-5624"></a>  type: DT_INT32
<a name="line-5625"></a>}
<a name="line-5626"></a>output_arg {
<a name="line-5627"></a>  description: "The `k` largest elements along each last dimensional slice."
<a name="line-5628"></a>  name: "values"
<a name="line-5629"></a>  type_attr: "T"
<a name="line-5630"></a>}
<a name="line-5631"></a>output_arg {
<a name="line-5632"></a>  description: "The indices of `values` within the last dimension of `input`."
<a name="line-5633"></a>  name: "indices"
<a name="line-5634"></a>  type: DT_INT32
<a name="line-5635"></a>}
<a name="line-5636"></a>-}</span>
<a name="line-5637"></a>
<a name="line-5638"></a><span class='hs-comment'>-- | Computes cos of x element-wise.</span>
<a name="line-5639"></a>
<a name="line-5640"></a><a name="cos"></a><span class='hs-definition'>cos</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5641"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5642"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5643"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5644"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5645"></a><span class='hs-definition'>cos</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5646"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cos"</span>
<a name="line-5647"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5648"></a>        <span class='hs-varid'>x</span>
<a name="line-5649"></a><span class='hs-comment'>{-
<a name="line-5650"></a>attr {
<a name="line-5651"></a>  allowed_values {
<a name="line-5652"></a>    list {
<a name="line-5653"></a>      type: DT_HALF
<a name="line-5654"></a>      type: DT_FLOAT
<a name="line-5655"></a>      type: DT_DOUBLE
<a name="line-5656"></a>      type: DT_COMPLEX64
<a name="line-5657"></a>      type: DT_COMPLEX128
<a name="line-5658"></a>    }
<a name="line-5659"></a>  }
<a name="line-5660"></a>  name: "T"
<a name="line-5661"></a>  type: "type"
<a name="line-5662"></a>}
<a name="line-5663"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5664"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5665"></a>-}</span>
<a name="line-5666"></a>
<a name="line-5667"></a><span class='hs-comment'>-- | Computes sin of x element-wise.</span>
<a name="line-5668"></a>
<a name="line-5669"></a><a name="sin"></a><span class='hs-definition'>sin</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5670"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5671"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5672"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5673"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5674"></a><span class='hs-definition'>sin</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5675"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sin"</span>
<a name="line-5676"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5677"></a>        <span class='hs-varid'>x</span>
<a name="line-5678"></a><span class='hs-comment'>{-
<a name="line-5679"></a>attr {
<a name="line-5680"></a>  allowed_values {
<a name="line-5681"></a>    list {
<a name="line-5682"></a>      type: DT_HALF
<a name="line-5683"></a>      type: DT_FLOAT
<a name="line-5684"></a>      type: DT_DOUBLE
<a name="line-5685"></a>      type: DT_COMPLEX64
<a name="line-5686"></a>      type: DT_COMPLEX128
<a name="line-5687"></a>    }
<a name="line-5688"></a>  }
<a name="line-5689"></a>  name: "T"
<a name="line-5690"></a>  type: "type"
<a name="line-5691"></a>}
<a name="line-5692"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5693"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5694"></a>-}</span>
<a name="line-5695"></a>
<a name="line-5696"></a><a name="randomUniformInt"></a><span class='hs-comment'>-- | Outputs random integers from a uniform distribution.</span>
<a name="line-5697"></a><span class='hs-comment'>--</span>
<a name="line-5698"></a><span class='hs-comment'>-- The generated values are uniform integers in the range `[minval, maxval)`.</span>
<a name="line-5699"></a><span class='hs-comment'>-- The lower bound `minval` is included in the range, while the upper bound</span>
<a name="line-5700"></a><span class='hs-comment'>-- `maxval` is excluded.</span>
<a name="line-5701"></a><span class='hs-comment'>-- </span>
<a name="line-5702"></a><span class='hs-comment'>-- The random integers are slightly biased unless `maxval - minval` is an exact</span>
<a name="line-5703"></a><span class='hs-comment'>-- power of two.  The bias is small for values of `maxval - minval` significantly</span>
<a name="line-5704"></a><span class='hs-comment'>-- smaller than the range of the output (either `2^32` or `2^64`).</span>
<a name="line-5705"></a><span class='hs-definition'>randomUniformInt</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tout</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span>
<a name="line-5706"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5707"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span>
<a name="line-5708"></a>                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-5709"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-5710"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5711"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __shape__: The shape of the output tensor.</span>
<a name="line-5712"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __minval__: 0-D.  Inclusive lower bound on the generated integers.</span>
<a name="line-5713"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __maxval__: 0-D.  Exclusive upper bound on the generated integers.</span>
<a name="line-5714"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor of the specified shape filled with uniform random integers.</span>
<a name="line-5715"></a><span class='hs-definition'>randomUniformInt</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>minval</span> <span class='hs-varid'>maxval</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5716"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomUniformInt"</span>
<a name="line-5717"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span>
<a name="line-5718"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5719"></a>        <span class='hs-varid'>shape</span> <span class='hs-varid'>minval</span> <span class='hs-varid'>maxval</span>
<a name="line-5720"></a><span class='hs-comment'>{-
<a name="line-5721"></a>attr {
<a name="line-5722"></a>  default_value { i: 0 }
<a name="line-5723"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-5724"></a>  name: "seed"
<a name="line-5725"></a>  type: "int"
<a name="line-5726"></a>}
<a name="line-5727"></a>attr {
<a name="line-5728"></a>  default_value { i: 0 }
<a name="line-5729"></a>  description: "A second seed to avoid seed collision."
<a name="line-5730"></a>  name: "seed2"
<a name="line-5731"></a>  type: "int"
<a name="line-5732"></a>}
<a name="line-5733"></a>attr {
<a name="line-5734"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-5735"></a>  name: "Tout"
<a name="line-5736"></a>  type: "type"
<a name="line-5737"></a>}
<a name="line-5738"></a>attr {
<a name="line-5739"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-5740"></a>  name: "T"
<a name="line-5741"></a>  type: "type"
<a name="line-5742"></a>}
<a name="line-5743"></a>input_arg {
<a name="line-5744"></a>  description: "The shape of the output tensor."
<a name="line-5745"></a>  name: "shape"
<a name="line-5746"></a>  type_attr: "T"
<a name="line-5747"></a>}
<a name="line-5748"></a>input_arg {
<a name="line-5749"></a>  description: "0-D.  Inclusive lower bound on the generated integers."
<a name="line-5750"></a>  name: "minval"
<a name="line-5751"></a>  type_attr: "Tout"
<a name="line-5752"></a>}
<a name="line-5753"></a>input_arg {
<a name="line-5754"></a>  description: "0-D.  Exclusive upper bound on the generated integers."
<a name="line-5755"></a>  name: "maxval"
<a name="line-5756"></a>  type_attr: "Tout"
<a name="line-5757"></a>}
<a name="line-5758"></a>output_arg {
<a name="line-5759"></a>  description: "A tensor of the specified shape filled with uniform random integers."
<a name="line-5760"></a>  name: "output"
<a name="line-5761"></a>  type_attr: "Tout"
<a name="line-5762"></a>}
<a name="line-5763"></a>-}</span>
<a name="line-5764"></a>
<a name="line-5765"></a><span class='hs-comment'>-- | Computes the complementary error function of `x` element-wise.</span>
<a name="line-5766"></a>
<a name="line-5767"></a><a name="erfc"></a><span class='hs-definition'>erfc</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5768"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5769"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5770"></a><span class='hs-definition'>erfc</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5771"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Erfc"</span>
<a name="line-5772"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5773"></a>        <span class='hs-varid'>x</span>
<a name="line-5774"></a><span class='hs-comment'>{-
<a name="line-5775"></a>attr {
<a name="line-5776"></a>  allowed_values {
<a name="line-5777"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5778"></a>  }
<a name="line-5779"></a>  name: "T"
<a name="line-5780"></a>  type: "type"
<a name="line-5781"></a>}
<a name="line-5782"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5783"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5784"></a>-}</span>
<a name="line-5785"></a>
<a name="line-5786"></a><a name="digamma"></a><span class='hs-comment'>-- | Computes Psi, the derivative of Lgamma (the log of the absolute value of</span>
<a name="line-5787"></a><span class='hs-comment'>--</span>
<a name="line-5788"></a><span class='hs-comment'>-- `Gamma(x)`), element-wise.</span>
<a name="line-5789"></a><span class='hs-definition'>digamma</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5790"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5791"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5792"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5793"></a><span class='hs-definition'>digamma</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5794"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Digamma"</span>
<a name="line-5795"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5796"></a>        <span class='hs-varid'>x</span>
<a name="line-5797"></a><span class='hs-comment'>{-
<a name="line-5798"></a>attr {
<a name="line-5799"></a>  allowed_values {
<a name="line-5800"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5801"></a>  }
<a name="line-5802"></a>  name: "T"
<a name="line-5803"></a>  type: "type"
<a name="line-5804"></a>}
<a name="line-5805"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5806"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5807"></a>-}</span>
<a name="line-5808"></a>
<a name="line-5809"></a><a name="fusedResizeAndPadConv2D"></a><span class='hs-comment'>-- | Performs a resize and padding as a preprocess during a convolution.</span>
<a name="line-5810"></a><span class='hs-comment'>--</span>
<a name="line-5811"></a><span class='hs-comment'>-- It's often possible to do spatial transformations more efficiently as part of</span>
<a name="line-5812"></a><span class='hs-comment'>-- the packing stage of a convolution, so this op allows for an optimized</span>
<a name="line-5813"></a><span class='hs-comment'>-- implementation where these stages are fused together. This prevents the need to</span>
<a name="line-5814"></a><span class='hs-comment'>-- write out the intermediate results as whole tensors, reducing memory pressure,</span>
<a name="line-5815"></a><span class='hs-comment'>-- and we can get some latency gains by merging the transformation calculations.</span>
<a name="line-5816"></a><span class='hs-comment'>-- The data_format attribute for Conv2D isn't supported by this op, and defaults to</span>
<a name="line-5817"></a><span class='hs-comment'>-- 'NHWC' order.</span>
<a name="line-5818"></a><span class='hs-comment'>-- Internally this op uses a single per-graph scratch buffer, which means that it</span>
<a name="line-5819"></a><span class='hs-comment'>-- will block if multiple versions are being run in parallel. This is because this</span>
<a name="line-5820"></a><span class='hs-comment'>-- operator is primarily an optimization to minimize memory usage.</span>
<a name="line-5821"></a><span class='hs-definition'>fusedResizeAndPadConv2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-5822"></a>                                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-5823"></a>                                                           <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5824"></a>                           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.</span>
<a name="line-5825"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The</span>
<a name="line-5826"></a>                                                       <span class='hs-comment'>-- new size for the images.</span>
<a name="line-5827"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of</span>
<a name="line-5828"></a>                                                       <span class='hs-comment'>-- rows must be the same as the rank of `input`.</span>
<a name="line-5829"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 4-D with shape</span>
<a name="line-5830"></a>                                          <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`.</span>
<a name="line-5831"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-5832"></a><span class='hs-definition'>fusedResizeAndPadConv2D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>size</span> <span class='hs-varid'>paddings</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5833"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FusedResizeAndPadConv2D"</span>
<a name="line-5834"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5835"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>size</span> <span class='hs-varid'>paddings</span> <span class='hs-varid'>filter</span>
<a name="line-5836"></a><span class='hs-comment'>{-
<a name="line-5837"></a>attr {
<a name="line-5838"></a>  allowed_values {
<a name="line-5839"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5840"></a>  }
<a name="line-5841"></a>  name: "T"
<a name="line-5842"></a>  type: "type"
<a name="line-5843"></a>}
<a name="line-5844"></a>attr {
<a name="line-5845"></a>  default_value { b: false }
<a name="line-5846"></a>  description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
<a name="line-5847"></a>  name: "resize_align_corners"
<a name="line-5848"></a>  type: "bool"
<a name="line-5849"></a>}
<a name="line-5850"></a>attr {
<a name="line-5851"></a>  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
<a name="line-5852"></a>  name: "mode"
<a name="line-5853"></a>  type: "string"
<a name="line-5854"></a>}
<a name="line-5855"></a>attr {
<a name="line-5856"></a>  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
<a name="line-5857"></a>  name: "strides"
<a name="line-5858"></a>  type: "list(int)"
<a name="line-5859"></a>}
<a name="line-5860"></a>attr {
<a name="line-5861"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-5862"></a>  description: "The type of padding algorithm to use."
<a name="line-5863"></a>  name: "padding"
<a name="line-5864"></a>  type: "string"
<a name="line-5865"></a>}
<a name="line-5866"></a>input_arg {
<a name="line-5867"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
<a name="line-5868"></a>  name: "input"
<a name="line-5869"></a>  type_attr: "T"
<a name="line-5870"></a>}
<a name="line-5871"></a>input_arg {
<a name="line-5872"></a>  description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
<a name="line-5873"></a>  name: "size"
<a name="line-5874"></a>  type: DT_INT32
<a name="line-5875"></a>}
<a name="line-5876"></a>input_arg {
<a name="line-5877"></a>  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
<a name="line-5878"></a>  name: "paddings"
<a name="line-5879"></a>  type: DT_INT32
<a name="line-5880"></a>}
<a name="line-5881"></a>input_arg {
<a name="line-5882"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
<a name="line-5883"></a>  name: "filter"
<a name="line-5884"></a>  type_attr: "T"
<a name="line-5885"></a>}
<a name="line-5886"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-5887"></a>-}</span>
<a name="line-5888"></a>
<a name="line-5889"></a><a name="sub"></a><span class='hs-comment'>-- | Returns x - y element-wise.</span>
<a name="line-5890"></a><span class='hs-comment'>--</span>
<a name="line-5891"></a><span class='hs-comment'>-- *NOTE*: `Sub` supports broadcasting. More about broadcasting</span>
<a name="line-5892"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-5893"></a><span class='hs-definition'>sub</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5894"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5895"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-5896"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5897"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5898"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5899"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5900"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-5901"></a><span class='hs-definition'>sub</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5902"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sub"</span>
<a name="line-5903"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5904"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-5905"></a><span class='hs-comment'>{-
<a name="line-5906"></a>attr {
<a name="line-5907"></a>  allowed_values {
<a name="line-5908"></a>    list {
<a name="line-5909"></a>      type: DT_HALF
<a name="line-5910"></a>      type: DT_FLOAT
<a name="line-5911"></a>      type: DT_DOUBLE
<a name="line-5912"></a>      type: DT_INT32
<a name="line-5913"></a>      type: DT_INT64
<a name="line-5914"></a>      type: DT_COMPLEX64
<a name="line-5915"></a>      type: DT_COMPLEX128
<a name="line-5916"></a>    }
<a name="line-5917"></a>  }
<a name="line-5918"></a>  name: "T"
<a name="line-5919"></a>  type: "type"
<a name="line-5920"></a>}
<a name="line-5921"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5922"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-5923"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-5924"></a>-}</span>
<a name="line-5925"></a>
<a name="line-5926"></a><a name="sign"></a><span class='hs-comment'>-- | Returns an element-wise indication of the sign of a number.</span>
<a name="line-5927"></a><span class='hs-comment'>--</span>
<a name="line-5928"></a><span class='hs-comment'>-- `y = sign(x) = -1` if `x &lt; 0`; 0 if `x == 0`; 1 if `x &gt; 0`.</span>
<a name="line-5929"></a><span class='hs-comment'>-- </span>
<a name="line-5930"></a><span class='hs-comment'>-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.</span>
<a name="line-5931"></a><span class='hs-definition'>sign</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5932"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5933"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-5934"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5935"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5936"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5937"></a><span class='hs-definition'>sign</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5938"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sign"</span>
<a name="line-5939"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5940"></a>        <span class='hs-varid'>x</span>
<a name="line-5941"></a><span class='hs-comment'>{-
<a name="line-5942"></a>attr {
<a name="line-5943"></a>  allowed_values {
<a name="line-5944"></a>    list {
<a name="line-5945"></a>      type: DT_HALF
<a name="line-5946"></a>      type: DT_FLOAT
<a name="line-5947"></a>      type: DT_DOUBLE
<a name="line-5948"></a>      type: DT_INT32
<a name="line-5949"></a>      type: DT_INT64
<a name="line-5950"></a>      type: DT_COMPLEX64
<a name="line-5951"></a>      type: DT_COMPLEX128
<a name="line-5952"></a>    }
<a name="line-5953"></a>  }
<a name="line-5954"></a>  name: "T"
<a name="line-5955"></a>  type: "type"
<a name="line-5956"></a>}
<a name="line-5957"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5958"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5959"></a>-}</span>
<a name="line-5960"></a>
<a name="line-5961"></a><span class='hs-comment'>-- | Computes the log of the absolute value of `Gamma(x)` element-wise.</span>
<a name="line-5962"></a>
<a name="line-5963"></a><a name="lgamma"></a><span class='hs-definition'>lgamma</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5964"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-5965"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5966"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5967"></a><span class='hs-definition'>lgamma</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5968"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Lgamma"</span>
<a name="line-5969"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5970"></a>        <span class='hs-varid'>x</span>
<a name="line-5971"></a><span class='hs-comment'>{-
<a name="line-5972"></a>attr {
<a name="line-5973"></a>  allowed_values {
<a name="line-5974"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-5975"></a>  }
<a name="line-5976"></a>  name: "T"
<a name="line-5977"></a>  type: "type"
<a name="line-5978"></a>}
<a name="line-5979"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-5980"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-5981"></a>-}</span>
<a name="line-5982"></a>
<a name="line-5983"></a><a name="log"></a><span class='hs-comment'>-- | Computes natural logarithm of x element-wise.</span>
<a name="line-5984"></a><span class='hs-comment'>--</span>
<a name="line-5985"></a><span class='hs-comment'>-- I.e., \\(y = \log_e x\\).</span>
<a name="line-5986"></a><span class='hs-definition'>log</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5987"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-5988"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-5989"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-5990"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-5991"></a><span class='hs-definition'>log</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-5992"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Log"</span>
<a name="line-5993"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-5994"></a>        <span class='hs-varid'>x</span>
<a name="line-5995"></a><span class='hs-comment'>{-
<a name="line-5996"></a>attr {
<a name="line-5997"></a>  allowed_values {
<a name="line-5998"></a>    list {
<a name="line-5999"></a>      type: DT_HALF
<a name="line-6000"></a>      type: DT_FLOAT
<a name="line-6001"></a>      type: DT_DOUBLE
<a name="line-6002"></a>      type: DT_COMPLEX64
<a name="line-6003"></a>      type: DT_COMPLEX128
<a name="line-6004"></a>    }
<a name="line-6005"></a>  }
<a name="line-6006"></a>  name: "T"
<a name="line-6007"></a>  type: "type"
<a name="line-6008"></a>}
<a name="line-6009"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6010"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6011"></a>-}</span>
<a name="line-6012"></a>
<a name="line-6013"></a><span class='hs-comment'>-- | Computes exponential of x element-wise.  \\(y = e^x\\).</span>
<a name="line-6014"></a>
<a name="line-6015"></a><a name="exp"></a><span class='hs-definition'>exp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6016"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6017"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6018"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6019"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6020"></a><span class='hs-definition'>exp</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6021"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Exp"</span>
<a name="line-6022"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6023"></a>        <span class='hs-varid'>x</span>
<a name="line-6024"></a><span class='hs-comment'>{-
<a name="line-6025"></a>attr {
<a name="line-6026"></a>  allowed_values {
<a name="line-6027"></a>    list {
<a name="line-6028"></a>      type: DT_HALF
<a name="line-6029"></a>      type: DT_FLOAT
<a name="line-6030"></a>      type: DT_DOUBLE
<a name="line-6031"></a>      type: DT_COMPLEX64
<a name="line-6032"></a>      type: DT_COMPLEX128
<a name="line-6033"></a>    }
<a name="line-6034"></a>  }
<a name="line-6035"></a>  name: "T"
<a name="line-6036"></a>  type: "type"
<a name="line-6037"></a>}
<a name="line-6038"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6039"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6040"></a>-}</span>
<a name="line-6041"></a>
<a name="line-6042"></a><a name="dilation2D"></a><span class='hs-comment'>-- | Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.</span>
<a name="line-6043"></a><span class='hs-comment'>--</span>
<a name="line-6044"></a><span class='hs-comment'>-- The `input` tensor has shape `[batch, in_height, in_width, depth]` and the</span>
<a name="line-6045"></a><span class='hs-comment'>-- `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each</span>
<a name="line-6046"></a><span class='hs-comment'>-- input channel is processed independently of the others with its own structuring</span>
<a name="line-6047"></a><span class='hs-comment'>-- function. The `output` tensor has shape</span>
<a name="line-6048"></a><span class='hs-comment'>-- `[batch, out_height, out_width, depth]`. The spatial dimensions of the output</span>
<a name="line-6049"></a><span class='hs-comment'>-- tensor depend on the `padding` algorithm. We currently only support the default</span>
<a name="line-6050"></a><span class='hs-comment'>-- "NHWC" `data_format`.</span>
<a name="line-6051"></a><span class='hs-comment'>-- </span>
<a name="line-6052"></a><span class='hs-comment'>-- In detail, the grayscale morphological 2-D dilation is the max-sum correlation</span>
<a name="line-6053"></a><span class='hs-comment'>-- (for consistency with `conv2d`, we use unmirrored filters):</span>
<a name="line-6054"></a><span class='hs-comment'>-- </span>
<a name="line-6055"></a><span class='hs-comment'>--     output[b, y, x, c] =</span>
<a name="line-6056"></a><span class='hs-comment'>--        max_{dy, dx} input[b,</span>
<a name="line-6057"></a><span class='hs-comment'>--                           strides[1] * y + rates[1] * dy,</span>
<a name="line-6058"></a><span class='hs-comment'>--                           strides[2] * x + rates[2] * dx,</span>
<a name="line-6059"></a><span class='hs-comment'>--                           c] +</span>
<a name="line-6060"></a><span class='hs-comment'>--                     filter[dy, dx, c]</span>
<a name="line-6061"></a><span class='hs-comment'>-- </span>
<a name="line-6062"></a><span class='hs-comment'>-- Max-pooling is a special case when the filter has size equal to the pooling</span>
<a name="line-6063"></a><span class='hs-comment'>-- kernel size and contains all zeros.</span>
<a name="line-6064"></a><span class='hs-comment'>-- </span>
<a name="line-6065"></a><span class='hs-comment'>-- Note on duality: The dilation of `input` by the `filter` is equal to the</span>
<a name="line-6066"></a><span class='hs-comment'>-- negation of the erosion of `-input` by the reflected `filter`.</span>
<a name="line-6067"></a><span class='hs-definition'>dilation2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-6068"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6069"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6070"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-6071"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-6072"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6073"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6074"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.</span>
<a name="line-6075"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.</span>
<a name="line-6076"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`.</span>
<a name="line-6077"></a><span class='hs-definition'>dilation2D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6078"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Dilation2D"</span>
<a name="line-6079"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6080"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span>
<a name="line-6081"></a><span class='hs-comment'>{-
<a name="line-6082"></a>attr {
<a name="line-6083"></a>  allowed_values {
<a name="line-6084"></a>    list {
<a name="line-6085"></a>      type: DT_FLOAT
<a name="line-6086"></a>      type: DT_DOUBLE
<a name="line-6087"></a>      type: DT_INT32
<a name="line-6088"></a>      type: DT_INT64
<a name="line-6089"></a>      type: DT_UINT8
<a name="line-6090"></a>      type: DT_INT16
<a name="line-6091"></a>      type: DT_INT8
<a name="line-6092"></a>      type: DT_UINT16
<a name="line-6093"></a>      type: DT_HALF
<a name="line-6094"></a>    }
<a name="line-6095"></a>  }
<a name="line-6096"></a>  name: "T"
<a name="line-6097"></a>  type: "type"
<a name="line-6098"></a>}
<a name="line-6099"></a>attr {
<a name="line-6100"></a>  description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`."
<a name="line-6101"></a>  has_minimum: true
<a name="line-6102"></a>  minimum: 4
<a name="line-6103"></a>  name: "strides"
<a name="line-6104"></a>  type: "list(int)"
<a name="line-6105"></a>}
<a name="line-6106"></a>attr {
<a name="line-6107"></a>  description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`."
<a name="line-6108"></a>  has_minimum: true
<a name="line-6109"></a>  minimum: 4
<a name="line-6110"></a>  name: "rates"
<a name="line-6111"></a>  type: "list(int)"
<a name="line-6112"></a>}
<a name="line-6113"></a>attr {
<a name="line-6114"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-6115"></a>  description: "The type of padding algorithm to use."
<a name="line-6116"></a>  name: "padding"
<a name="line-6117"></a>  type: "string"
<a name="line-6118"></a>}
<a name="line-6119"></a>input_arg {
<a name="line-6120"></a>  description: "4-D with shape `[batch, in_height, in_width, depth]`."
<a name="line-6121"></a>  name: "input"
<a name="line-6122"></a>  type_attr: "T"
<a name="line-6123"></a>}
<a name="line-6124"></a>input_arg {
<a name="line-6125"></a>  description: "3-D with shape `[filter_height, filter_width, depth]`."
<a name="line-6126"></a>  name: "filter"
<a name="line-6127"></a>  type_attr: "T"
<a name="line-6128"></a>}
<a name="line-6129"></a>output_arg {
<a name="line-6130"></a>  description: "4-D with shape `[batch, out_height, out_width, depth]`."
<a name="line-6131"></a>  name: "output"
<a name="line-6132"></a>  type_attr: "T"
<a name="line-6133"></a>}
<a name="line-6134"></a>-}</span>
<a name="line-6135"></a>
<a name="line-6136"></a><a name="rsqrtGrad"></a><span class='hs-comment'>-- | Computes the gradient for the rsqrt of `x` wrt its input.</span>
<a name="line-6137"></a><span class='hs-comment'>--</span>
<a name="line-6138"></a><span class='hs-comment'>-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`</span>
<a name="line-6139"></a><span class='hs-comment'>-- is the corresponding input gradient.</span>
<a name="line-6140"></a><span class='hs-definition'>rsqrtGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6141"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6142"></a>                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6143"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6144"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6145"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6146"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-6147"></a><span class='hs-definition'>rsqrtGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6148"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RsqrtGrad"</span>
<a name="line-6149"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6150"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-6151"></a><span class='hs-comment'>{-
<a name="line-6152"></a>attr {
<a name="line-6153"></a>  allowed_values {
<a name="line-6154"></a>    list {
<a name="line-6155"></a>      type: DT_HALF
<a name="line-6156"></a>      type: DT_FLOAT
<a name="line-6157"></a>      type: DT_DOUBLE
<a name="line-6158"></a>      type: DT_COMPLEX64
<a name="line-6159"></a>      type: DT_COMPLEX128
<a name="line-6160"></a>    }
<a name="line-6161"></a>  }
<a name="line-6162"></a>  name: "T"
<a name="line-6163"></a>  type: "type"
<a name="line-6164"></a>}
<a name="line-6165"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6166"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-6167"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-6168"></a>-}</span>
<a name="line-6169"></a>
<a name="line-6170"></a><a name="rsqrt"></a><span class='hs-comment'>-- | Computes reciprocal of square root of x element-wise.</span>
<a name="line-6171"></a><span class='hs-comment'>--</span>
<a name="line-6172"></a><span class='hs-comment'>-- I.e., \\(y = 1 / \sqrt{x}\\).</span>
<a name="line-6173"></a><span class='hs-definition'>rsqrt</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6174"></a>                                              <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6175"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6176"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6177"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6178"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6179"></a><span class='hs-definition'>rsqrt</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6180"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Rsqrt"</span>
<a name="line-6181"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6182"></a>        <span class='hs-varid'>x</span>
<a name="line-6183"></a><span class='hs-comment'>{-
<a name="line-6184"></a>attr {
<a name="line-6185"></a>  allowed_values {
<a name="line-6186"></a>    list {
<a name="line-6187"></a>      type: DT_HALF
<a name="line-6188"></a>      type: DT_FLOAT
<a name="line-6189"></a>      type: DT_DOUBLE
<a name="line-6190"></a>      type: DT_COMPLEX64
<a name="line-6191"></a>      type: DT_COMPLEX128
<a name="line-6192"></a>    }
<a name="line-6193"></a>  }
<a name="line-6194"></a>  name: "T"
<a name="line-6195"></a>  type: "type"
<a name="line-6196"></a>}
<a name="line-6197"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6198"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6199"></a>-}</span>
<a name="line-6200"></a>
<a name="line-6201"></a><span class='hs-comment'>-- | Produces the max pool of the input tensor for quantized types.</span>
<a name="line-6202"></a>
<a name="line-6203"></a><a name="quantizedMaxPool"></a><span class='hs-definition'>quantizedMaxPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-6204"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6205"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-6206"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6207"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.</span>
<a name="line-6208"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_input__: The float value that the lowest quantized input value represents.</span>
<a name="line-6209"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_input__: The float value that the highest quantized input value represents.</span>
<a name="line-6210"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-6211"></a>                    <span class='hs-comment'>-- ^ (__output__, __min_output__, __max_output__)</span>
<a name="line-6212"></a>                    <span class='hs-comment'>--</span>
<a name="line-6213"></a>                    <span class='hs-comment'>-- * __output__</span>
<a name="line-6214"></a>                    <span class='hs-comment'>--</span>
<a name="line-6215"></a>                    <span class='hs-comment'>-- * __min_output__: The float value that the lowest quantized output value represents.</span>
<a name="line-6216"></a>                    <span class='hs-comment'>--</span>
<a name="line-6217"></a>                    <span class='hs-comment'>-- * __max_output__: The float value that the highest quantized output value represents.</span>
<a name="line-6218"></a><span class='hs-definition'>quantizedMaxPool</span> <span class='hs-varid'>input</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6219"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedMaxPool"</span>
<a name="line-6220"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6221"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span>
<a name="line-6222"></a><span class='hs-comment'>{-
<a name="line-6223"></a>attr {
<a name="line-6224"></a>  allowed_values {
<a name="line-6225"></a>    list {
<a name="line-6226"></a>      type: DT_QINT8
<a name="line-6227"></a>      type: DT_QUINT8
<a name="line-6228"></a>      type: DT_QINT16
<a name="line-6229"></a>      type: DT_QUINT16
<a name="line-6230"></a>      type: DT_QINT32
<a name="line-6231"></a>    }
<a name="line-6232"></a>  }
<a name="line-6233"></a>  name: "T"
<a name="line-6234"></a>  type: "type"
<a name="line-6235"></a>}
<a name="line-6236"></a>attr {
<a name="line-6237"></a>  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
<a name="line-6238"></a>  name: "ksize"
<a name="line-6239"></a>  type: "list(int)"
<a name="line-6240"></a>}
<a name="line-6241"></a>attr {
<a name="line-6242"></a>  description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input."
<a name="line-6243"></a>  name: "strides"
<a name="line-6244"></a>  type: "list(int)"
<a name="line-6245"></a>}
<a name="line-6246"></a>attr {
<a name="line-6247"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-6248"></a>  description: "The type of padding algorithm to use."
<a name="line-6249"></a>  name: "padding"
<a name="line-6250"></a>  type: "string"
<a name="line-6251"></a>}
<a name="line-6252"></a>input_arg {
<a name="line-6253"></a>  description: "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over."
<a name="line-6254"></a>  name: "input"
<a name="line-6255"></a>  type_attr: "T"
<a name="line-6256"></a>}
<a name="line-6257"></a>input_arg {
<a name="line-6258"></a>  description: "The float value that the lowest quantized input value represents."
<a name="line-6259"></a>  name: "min_input"
<a name="line-6260"></a>  type: DT_FLOAT
<a name="line-6261"></a>}
<a name="line-6262"></a>input_arg {
<a name="line-6263"></a>  description: "The float value that the highest quantized input value represents."
<a name="line-6264"></a>  name: "max_input"
<a name="line-6265"></a>  type: DT_FLOAT
<a name="line-6266"></a>}
<a name="line-6267"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-6268"></a>output_arg {
<a name="line-6269"></a>  description: "The float value that the lowest quantized output value represents."
<a name="line-6270"></a>  name: "min_output"
<a name="line-6271"></a>  type: DT_FLOAT
<a name="line-6272"></a>}
<a name="line-6273"></a>output_arg {
<a name="line-6274"></a>  description: "The float value that the highest quantized output value represents."
<a name="line-6275"></a>  name: "max_output"
<a name="line-6276"></a>  type: DT_FLOAT
<a name="line-6277"></a>}
<a name="line-6278"></a>-}</span>
<a name="line-6279"></a>
<a name="line-6280"></a><a name="sqrt"></a><span class='hs-comment'>-- | Computes square root of x element-wise.</span>
<a name="line-6281"></a><span class='hs-comment'>--</span>
<a name="line-6282"></a><span class='hs-comment'>-- I.e., \\(y = \sqrt{x} = x^{1/2}\\).</span>
<a name="line-6283"></a><span class='hs-definition'>sqrt</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6284"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6285"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6286"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6287"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6288"></a><span class='hs-definition'>sqrt</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6289"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sqrt"</span>
<a name="line-6290"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6291"></a>        <span class='hs-varid'>x</span>
<a name="line-6292"></a><span class='hs-comment'>{-
<a name="line-6293"></a>attr {
<a name="line-6294"></a>  allowed_values {
<a name="line-6295"></a>    list {
<a name="line-6296"></a>      type: DT_HALF
<a name="line-6297"></a>      type: DT_FLOAT
<a name="line-6298"></a>      type: DT_DOUBLE
<a name="line-6299"></a>      type: DT_COMPLEX64
<a name="line-6300"></a>      type: DT_COMPLEX128
<a name="line-6301"></a>    }
<a name="line-6302"></a>  }
<a name="line-6303"></a>  name: "T"
<a name="line-6304"></a>  type: "type"
<a name="line-6305"></a>}
<a name="line-6306"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6307"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6308"></a>-}</span>
<a name="line-6309"></a>
<a name="line-6310"></a><a name="identityReader"></a><span class='hs-comment'>-- | A Reader that outputs the queued work as both the key and value.</span>
<a name="line-6311"></a><span class='hs-comment'>--</span>
<a name="line-6312"></a><span class='hs-comment'>-- To use, enqueue strings in a Queue.  ReaderRead will take the front</span>
<a name="line-6313"></a><span class='hs-comment'>-- work string and output (work, work).</span>
<a name="line-6314"></a><span class='hs-definition'>identityReader</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __reader_handle__: The handle to reference the Reader.</span>
<a name="line-6315"></a><span class='hs-definition'>identityReader</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6316"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IdentityReader"</span><span class='hs-layout'>)</span>
<a name="line-6317"></a>        
<a name="line-6318"></a><span class='hs-comment'>{-
<a name="line-6319"></a>attr {
<a name="line-6320"></a>  default_value { s: "" }
<a name="line-6321"></a>  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
<a name="line-6322"></a>  name: "container"
<a name="line-6323"></a>  type: "string"
<a name="line-6324"></a>}
<a name="line-6325"></a>attr {
<a name="line-6326"></a>  default_value { s: "" }
<a name="line-6327"></a>  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-6328"></a>  name: "shared_name"
<a name="line-6329"></a>  type: "string"
<a name="line-6330"></a>}
<a name="line-6331"></a>output_arg {
<a name="line-6332"></a>  description: "The handle to reference the Reader."
<a name="line-6333"></a>  is_ref: true
<a name="line-6334"></a>  name: "reader_handle"
<a name="line-6335"></a>  type: DT_STRING
<a name="line-6336"></a>}
<a name="line-6337"></a>-}</span>
<a name="line-6338"></a>
<a name="line-6339"></a><a name="square"></a><span class='hs-comment'>-- | Computes square of x element-wise.</span>
<a name="line-6340"></a><span class='hs-comment'>--</span>
<a name="line-6341"></a><span class='hs-comment'>-- I.e., \\(y = x * x = x^2\\).</span>
<a name="line-6342"></a><span class='hs-definition'>square</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6343"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6344"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6345"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6346"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6347"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6348"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6349"></a><span class='hs-definition'>square</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6350"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Square"</span>
<a name="line-6351"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6352"></a>        <span class='hs-varid'>x</span>
<a name="line-6353"></a><span class='hs-comment'>{-
<a name="line-6354"></a>attr {
<a name="line-6355"></a>  allowed_values {
<a name="line-6356"></a>    list {
<a name="line-6357"></a>      type: DT_HALF
<a name="line-6358"></a>      type: DT_FLOAT
<a name="line-6359"></a>      type: DT_DOUBLE
<a name="line-6360"></a>      type: DT_INT32
<a name="line-6361"></a>      type: DT_INT64
<a name="line-6362"></a>      type: DT_COMPLEX64
<a name="line-6363"></a>      type: DT_COMPLEX128
<a name="line-6364"></a>    }
<a name="line-6365"></a>  }
<a name="line-6366"></a>  name: "T"
<a name="line-6367"></a>  type: "type"
<a name="line-6368"></a>}
<a name="line-6369"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6370"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6371"></a>-}</span>
<a name="line-6372"></a>
<a name="line-6373"></a><a name="quantizedReshape"></a><span class='hs-comment'>-- | Reshapes a quantized tensor as per the Reshape op.</span>
<a name="line-6374"></a><span class='hs-comment'>--</span>
<a name="line-6375"></a><span class='hs-comment'>-- ```</span>
<a name="line-6376"></a><span class='hs-definition'>quantizedReshape</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tshape</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6377"></a>                                                   <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>,</span>
<a name="line-6378"></a>                                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6379"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6380"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__</span>
<a name="line-6381"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tshape</span> <span class='hs-comment'>-- ^ __shape__: Defines the shape of the output tensor.</span>
<a name="line-6382"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_min__: The minimum value of the input.</span>
<a name="line-6383"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input_max__: The maximum value of the input.</span>
<a name="line-6384"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-6385"></a>                    <span class='hs-comment'>-- ^ (__output__, __output_min__, __output_max__)</span>
<a name="line-6386"></a>                    <span class='hs-comment'>--</span>
<a name="line-6387"></a>                    <span class='hs-comment'>-- * __output__</span>
<a name="line-6388"></a>                    <span class='hs-comment'>--</span>
<a name="line-6389"></a>                    <span class='hs-comment'>-- * __output_min__: This value is copied from input_min.</span>
<a name="line-6390"></a>                    <span class='hs-comment'>--</span>
<a name="line-6391"></a>                    <span class='hs-comment'>-- * __output_max__: This value is copied from input_max.</span>
<a name="line-6392"></a><span class='hs-definition'>quantizedReshape</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6393"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedReshape"</span>
<a name="line-6394"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-6395"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tshape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6396"></a>        <span class='hs-varid'>tensor</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>input_min</span> <span class='hs-varid'>input_max</span>
<a name="line-6397"></a><span class='hs-comment'>{-
<a name="line-6398"></a>attr { name: "T" type: "type" }
<a name="line-6399"></a>attr {
<a name="line-6400"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-6401"></a>  default_value { type: DT_INT32 }
<a name="line-6402"></a>  name: "Tshape"
<a name="line-6403"></a>  type: "type"
<a name="line-6404"></a>}
<a name="line-6405"></a>input_arg { name: "tensor" type_attr: "T" }
<a name="line-6406"></a>input_arg {
<a name="line-6407"></a>  description: "Defines the shape of the output tensor."
<a name="line-6408"></a>  name: "shape"
<a name="line-6409"></a>  type_attr: "Tshape"
<a name="line-6410"></a>}
<a name="line-6411"></a>input_arg {
<a name="line-6412"></a>  description: "The minimum value of the input."
<a name="line-6413"></a>  name: "input_min"
<a name="line-6414"></a>  type: DT_FLOAT
<a name="line-6415"></a>}
<a name="line-6416"></a>input_arg {
<a name="line-6417"></a>  description: "The maximum value of the input."
<a name="line-6418"></a>  name: "input_max"
<a name="line-6419"></a>  type: DT_FLOAT
<a name="line-6420"></a>}
<a name="line-6421"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-6422"></a>output_arg {
<a name="line-6423"></a>  description: "This value is copied from input_min."
<a name="line-6424"></a>  name: "output_min"
<a name="line-6425"></a>  type: DT_FLOAT
<a name="line-6426"></a>}
<a name="line-6427"></a>output_arg {
<a name="line-6428"></a>  description: "This value is copied from input_max."
<a name="line-6429"></a>  name: "output_max"
<a name="line-6430"></a>  type: DT_FLOAT
<a name="line-6431"></a>}
<a name="line-6432"></a>-}</span>
<a name="line-6433"></a>
<a name="line-6434"></a><a name="reciprocalGrad"></a><span class='hs-comment'>-- | Computes the gradient for the inverse of `x` wrt its input.</span>
<a name="line-6435"></a><span class='hs-comment'>--</span>
<a name="line-6436"></a><span class='hs-comment'>-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`</span>
<a name="line-6437"></a><span class='hs-comment'>-- is the corresponding input gradient.</span>
<a name="line-6438"></a><span class='hs-definition'>reciprocalGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6439"></a>                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6440"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6441"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6442"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6443"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6444"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-6445"></a><span class='hs-definition'>reciprocalGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6446"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReciprocalGrad"</span>
<a name="line-6447"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6448"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-6449"></a><span class='hs-comment'>{-
<a name="line-6450"></a>attr {
<a name="line-6451"></a>  allowed_values {
<a name="line-6452"></a>    list {
<a name="line-6453"></a>      type: DT_HALF
<a name="line-6454"></a>      type: DT_FLOAT
<a name="line-6455"></a>      type: DT_DOUBLE
<a name="line-6456"></a>      type: DT_COMPLEX64
<a name="line-6457"></a>      type: DT_COMPLEX128
<a name="line-6458"></a>    }
<a name="line-6459"></a>  }
<a name="line-6460"></a>  name: "T"
<a name="line-6461"></a>  type: "type"
<a name="line-6462"></a>}
<a name="line-6463"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6464"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-6465"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-6466"></a>-}</span>
<a name="line-6467"></a>
<a name="line-6468"></a><a name="invGrad"></a><span class='hs-comment'>-- | Computes the gradient for the inverse of `x` wrt its input.</span>
<a name="line-6469"></a><span class='hs-comment'>--</span>
<a name="line-6470"></a><span class='hs-comment'>-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`</span>
<a name="line-6471"></a><span class='hs-comment'>-- is the corresponding input gradient.</span>
<a name="line-6472"></a><span class='hs-definition'>invGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6473"></a>                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6474"></a>                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6475"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6476"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6477"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6478"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-6479"></a><span class='hs-definition'>invGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6480"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"InvGrad"</span>
<a name="line-6481"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6482"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-6483"></a><span class='hs-comment'>{-
<a name="line-6484"></a>attr {
<a name="line-6485"></a>  allowed_values {
<a name="line-6486"></a>    list {
<a name="line-6487"></a>      type: DT_HALF
<a name="line-6488"></a>      type: DT_FLOAT
<a name="line-6489"></a>      type: DT_DOUBLE
<a name="line-6490"></a>      type: DT_COMPLEX64
<a name="line-6491"></a>      type: DT_COMPLEX128
<a name="line-6492"></a>    }
<a name="line-6493"></a>  }
<a name="line-6494"></a>  name: "T"
<a name="line-6495"></a>  type: "type"
<a name="line-6496"></a>}
<a name="line-6497"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6498"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-6499"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-6500"></a>-}</span>
<a name="line-6501"></a>
<a name="line-6502"></a><a name="inv"></a><span class='hs-comment'>-- | Computes the reciprocal of x element-wise.</span>
<a name="line-6503"></a><span class='hs-comment'>--</span>
<a name="line-6504"></a><span class='hs-comment'>-- I.e., \\(y = 1 / x\\).</span>
<a name="line-6505"></a><span class='hs-definition'>inv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6506"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6507"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6508"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6509"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6510"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6511"></a><span class='hs-definition'>inv</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6512"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Inv"</span>
<a name="line-6513"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6514"></a>        <span class='hs-varid'>x</span>
<a name="line-6515"></a><span class='hs-comment'>{-
<a name="line-6516"></a>attr {
<a name="line-6517"></a>  allowed_values {
<a name="line-6518"></a>    list {
<a name="line-6519"></a>      type: DT_HALF
<a name="line-6520"></a>      type: DT_FLOAT
<a name="line-6521"></a>      type: DT_DOUBLE
<a name="line-6522"></a>      type: DT_INT32
<a name="line-6523"></a>      type: DT_INT64
<a name="line-6524"></a>      type: DT_COMPLEX64
<a name="line-6525"></a>      type: DT_COMPLEX128
<a name="line-6526"></a>    }
<a name="line-6527"></a>  }
<a name="line-6528"></a>  name: "T"
<a name="line-6529"></a>  type: "type"
<a name="line-6530"></a>}
<a name="line-6531"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6532"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-6533"></a>-}</span>
<a name="line-6534"></a>
<a name="line-6535"></a><a name="tensorArrayConcatV2"></a><span class='hs-comment'>-- | Concat the elements from the TensorArray into value `value`.</span>
<a name="line-6536"></a><span class='hs-comment'>--</span>
<a name="line-6537"></a><span class='hs-comment'>-- Takes `T` elements of shapes</span>
<a name="line-6538"></a><span class='hs-comment'>-- </span>
<a name="line-6539"></a><span class='hs-comment'>--   ```</span>
<a name="line-6540"></a><span class='hs-comment'>--   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)</span>
<a name="line-6541"></a><span class='hs-comment'>--   ```</span>
<a name="line-6542"></a><span class='hs-comment'>-- </span>
<a name="line-6543"></a><span class='hs-comment'>-- and concatenates them into a Tensor of shape:</span>
<a name="line-6544"></a><span class='hs-comment'>-- </span>
<a name="line-6545"></a><span class='hs-comment'>--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```</span>
<a name="line-6546"></a><span class='hs-comment'>-- </span>
<a name="line-6547"></a><span class='hs-comment'>-- All elements must have the same shape (excepting the first dimension).</span>
<a name="line-6548"></a><span class='hs-definition'>tensorArrayConcatV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6549"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-6550"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-6551"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-6552"></a>                       <span class='hs-comment'>-- ^ (__value__, __lengths__)</span>
<a name="line-6553"></a>                       <span class='hs-comment'>--</span>
<a name="line-6554"></a>                       <span class='hs-comment'>-- * __value__: All of the elements in the TensorArray, concatenated along the first</span>
<a name="line-6555"></a>                       <span class='hs-comment'>-- axis.</span>
<a name="line-6556"></a>                       <span class='hs-comment'>--</span>
<a name="line-6557"></a>                       <span class='hs-comment'>-- * __lengths__: A vector of the row sizes of the original T elements in the</span>
<a name="line-6558"></a>                       <span class='hs-comment'>-- value output.  In the example above, this would be the values:</span>
<a name="line-6559"></a>                       <span class='hs-comment'>-- `(n1, n2, ..., n(T-1))`.</span>
<a name="line-6560"></a><span class='hs-definition'>tensorArrayConcatV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6561"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayConcatV2"</span>
<a name="line-6562"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6563"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-6564"></a><span class='hs-comment'>{-
<a name="line-6565"></a>attr {
<a name="line-6566"></a>  description: "The type of the elem that is returned."
<a name="line-6567"></a>  name: "dtype"
<a name="line-6568"></a>  type: "type"
<a name="line-6569"></a>}
<a name="line-6570"></a>attr {
<a name="line-6571"></a>  default_value { shape { unknown_rank: true } }
<a name="line-6572"></a>  description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error."
<a name="line-6573"></a>  name: "element_shape_except0"
<a name="line-6574"></a>  type: "shape"
<a name="line-6575"></a>}
<a name="line-6576"></a>input_arg {
<a name="line-6577"></a>  description: "The handle to a TensorArray."
<a name="line-6578"></a>  name: "handle"
<a name="line-6579"></a>  type: DT_STRING
<a name="line-6580"></a>}
<a name="line-6581"></a>input_arg {
<a name="line-6582"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-6583"></a>  name: "flow_in"
<a name="line-6584"></a>  type: DT_FLOAT
<a name="line-6585"></a>}
<a name="line-6586"></a>output_arg {
<a name="line-6587"></a>  description: "All of the elements in the TensorArray, concatenated along the first\naxis."
<a name="line-6588"></a>  name: "value"
<a name="line-6589"></a>  type_attr: "dtype"
<a name="line-6590"></a>}
<a name="line-6591"></a>output_arg {
<a name="line-6592"></a>  description: "A vector of the row sizes of the original T elements in the\nvalue output.  In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`."
<a name="line-6593"></a>  name: "lengths"
<a name="line-6594"></a>  type: DT_INT64
<a name="line-6595"></a>}
<a name="line-6596"></a>-}</span>
<a name="line-6597"></a>
<a name="line-6598"></a><a name="complexAbs"></a><span class='hs-comment'>-- | Computes the complex absolute value of a tensor.</span>
<a name="line-6599"></a><span class='hs-comment'>--</span>
<a name="line-6600"></a><span class='hs-comment'>-- Given a tensor `x` of complex numbers, this operation returns a tensor of type</span>
<a name="line-6601"></a><span class='hs-comment'>-- `float` or `double` that is the absolute value of each element in `x`. All</span>
<a name="line-6602"></a><span class='hs-comment'>-- elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute</span>
<a name="line-6603"></a><span class='hs-comment'>-- value is computed as \\( \sqrt{a^2 + b^2}\\).</span>
<a name="line-6604"></a><span class='hs-comment'>-- </span>
<a name="line-6605"></a><span class='hs-comment'>-- For example:</span>
<a name="line-6606"></a><span class='hs-comment'>-- </span>
<a name="line-6607"></a><span class='hs-comment'>-- ```</span>
<a name="line-6608"></a><span class='hs-comment'>-- # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]</span>
<a name="line-6609"></a><span class='hs-comment'>-- tf.complex_abs(x) ==&gt; [5.25594902, 6.60492229]</span>
<a name="line-6610"></a><span class='hs-comment'>-- ```</span>
<a name="line-6611"></a><span class='hs-definition'>complexAbs</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6612"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6613"></a>                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6614"></a>                                  <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6615"></a>                                                           <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6616"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6617"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6618"></a><span class='hs-definition'>complexAbs</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6619"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ComplexAbs"</span>
<a name="line-6620"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-6621"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6622"></a>        <span class='hs-varid'>x</span>
<a name="line-6623"></a><span class='hs-comment'>{-
<a name="line-6624"></a>attr {
<a name="line-6625"></a>  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
<a name="line-6626"></a>  default_value { type: DT_COMPLEX64 }
<a name="line-6627"></a>  name: "T"
<a name="line-6628"></a>  type: "type"
<a name="line-6629"></a>}
<a name="line-6630"></a>attr {
<a name="line-6631"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-6632"></a>  default_value { type: DT_FLOAT }
<a name="line-6633"></a>  name: "Tout"
<a name="line-6634"></a>  type: "type"
<a name="line-6635"></a>}
<a name="line-6636"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-6637"></a>output_arg { name: "y" type_attr: "Tout" }
<a name="line-6638"></a>-}</span>
<a name="line-6639"></a>
<a name="line-6640"></a><span class='hs-comment'>-- | Cast x of type SrcT to y of DstT.</span>
<a name="line-6641"></a><span class='hs-comment'>--</span>
<a name="line-6642"></a><span class='hs-comment'>-- _HostCast requires its input and produces its output in host memory.</span>
<a name="line-6643"></a><span class='hs-sel'>_HostCast</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>srcT</span> <span class='hs-varid'>dstT</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>srcT</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>dstT</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6644"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>srcT</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6645"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dstT</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6646"></a><span class='hs-sel'>_HostCast</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6647"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"_HostCast"</span>
<a name="line-6648"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"SrcT"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>srcT</span><span class='hs-layout'>)</span>
<a name="line-6649"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"DstT"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dstT</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6650"></a>        <span class='hs-varid'>x</span>
<a name="line-6651"></a><span class='hs-comment'>{-
<a name="line-6652"></a>attr { name: "SrcT" type: "type" }
<a name="line-6653"></a>attr { name: "DstT" type: "type" }
<a name="line-6654"></a>input_arg { name: "x" type_attr: "SrcT" }
<a name="line-6655"></a>output_arg { name: "y" type_attr: "DstT" }
<a name="line-6656"></a>-}</span>
<a name="line-6657"></a>
<a name="line-6658"></a><span class='hs-comment'>-- | Resize `images` to `size` using nearest neighbor interpolation.</span>
<a name="line-6659"></a>
<a name="line-6660"></a><a name="resizeNearestNeighbor"></a><span class='hs-definition'>resizeNearestNeighbor</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-6661"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6662"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6663"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-6664"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-6665"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-6666"></a>                                                                 <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6667"></a>                                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6668"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-6669"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The</span>
<a name="line-6670"></a>                                                     <span class='hs-comment'>-- new size for the images.</span>
<a name="line-6671"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __resized_images__: 4-D with shape</span>
<a name="line-6672"></a>                         <span class='hs-comment'>-- `[batch, new_height, new_width, channels]`.</span>
<a name="line-6673"></a><span class='hs-definition'>resizeNearestNeighbor</span> <span class='hs-varid'>images</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6674"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeNearestNeighbor"</span>
<a name="line-6675"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6676"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>size</span>
<a name="line-6677"></a><span class='hs-comment'>{-
<a name="line-6678"></a>attr {
<a name="line-6679"></a>  allowed_values {
<a name="line-6680"></a>    list {
<a name="line-6681"></a>      type: DT_UINT8
<a name="line-6682"></a>      type: DT_INT8
<a name="line-6683"></a>      type: DT_INT16
<a name="line-6684"></a>      type: DT_INT32
<a name="line-6685"></a>      type: DT_INT64
<a name="line-6686"></a>      type: DT_HALF
<a name="line-6687"></a>      type: DT_FLOAT
<a name="line-6688"></a>      type: DT_DOUBLE
<a name="line-6689"></a>    }
<a name="line-6690"></a>  }
<a name="line-6691"></a>  name: "T"
<a name="line-6692"></a>  type: "type"
<a name="line-6693"></a>}
<a name="line-6694"></a>attr {
<a name="line-6695"></a>  default_value { b: false }
<a name="line-6696"></a>  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
<a name="line-6697"></a>  name: "align_corners"
<a name="line-6698"></a>  type: "bool"
<a name="line-6699"></a>}
<a name="line-6700"></a>input_arg {
<a name="line-6701"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-6702"></a>  name: "images"
<a name="line-6703"></a>  type_attr: "T"
<a name="line-6704"></a>}
<a name="line-6705"></a>input_arg {
<a name="line-6706"></a>  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
<a name="line-6707"></a>  name: "size"
<a name="line-6708"></a>  type: DT_INT32
<a name="line-6709"></a>}
<a name="line-6710"></a>output_arg {
<a name="line-6711"></a>  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
<a name="line-6712"></a>  name: "resized_images"
<a name="line-6713"></a>  type_attr: "T"
<a name="line-6714"></a>}
<a name="line-6715"></a>-}</span>
<a name="line-6716"></a>
<a name="line-6717"></a><span class='hs-comment'>-- | Deprecated. Disallowed in GraphDef version &gt;= 2.</span>
<a name="line-6718"></a>
<a name="line-6719"></a><a name="adjustContrast"></a><span class='hs-definition'>adjustContrast</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-6720"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6721"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6722"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-6723"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-6724"></a>                                                                <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6725"></a>                                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6726"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__</span>
<a name="line-6727"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __contrast_factor__</span>
<a name="line-6728"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_value__</span>
<a name="line-6729"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_value__</span>
<a name="line-6730"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-6731"></a><span class='hs-definition'>adjustContrast</span> <span class='hs-varid'>images</span> <span class='hs-varid'>contrast_factor</span> <span class='hs-varid'>min_value</span> <span class='hs-varid'>max_value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6732"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AdjustContrast"</span>
<a name="line-6733"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6734"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>contrast_factor</span> <span class='hs-varid'>min_value</span> <span class='hs-varid'>max_value</span>
<a name="line-6735"></a><span class='hs-comment'>{-
<a name="line-6736"></a>attr {
<a name="line-6737"></a>  allowed_values {
<a name="line-6738"></a>    list {
<a name="line-6739"></a>      type: DT_UINT8
<a name="line-6740"></a>      type: DT_INT8
<a name="line-6741"></a>      type: DT_INT16
<a name="line-6742"></a>      type: DT_INT32
<a name="line-6743"></a>      type: DT_INT64
<a name="line-6744"></a>      type: DT_FLOAT
<a name="line-6745"></a>      type: DT_DOUBLE
<a name="line-6746"></a>    }
<a name="line-6747"></a>  }
<a name="line-6748"></a>  name: "T"
<a name="line-6749"></a>  type: "type"
<a name="line-6750"></a>}
<a name="line-6751"></a>input_arg { name: "images" type_attr: "T" }
<a name="line-6752"></a>input_arg { name: "contrast_factor" type: DT_FLOAT }
<a name="line-6753"></a>input_arg { name: "min_value" type: DT_FLOAT }
<a name="line-6754"></a>input_arg { name: "max_value" type: DT_FLOAT }
<a name="line-6755"></a>output_arg { name: "output" type: DT_FLOAT }
<a name="line-6756"></a>-}</span>
<a name="line-6757"></a>
<a name="line-6758"></a><span class='hs-comment'>-- | </span>
<a name="line-6759"></a>
<a name="line-6760"></a><a name="batchMatrixDiagPart"></a><span class='hs-definition'>batchMatrixDiagPart</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6761"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-6762"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__</span>
<a name="line-6763"></a><span class='hs-definition'>batchMatrixDiagPart</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6764"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixDiagPart"</span>
<a name="line-6765"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6766"></a>        <span class='hs-varid'>input</span>
<a name="line-6767"></a><span class='hs-comment'>{-
<a name="line-6768"></a>attr { name: "T" type: "type" }
<a name="line-6769"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-6770"></a>output_arg { name: "diagonal" type_attr: "T" }
<a name="line-6771"></a>-}</span>
<a name="line-6772"></a>
<a name="line-6773"></a><span class='hs-comment'>-- | </span>
<a name="line-6774"></a>
<a name="line-6775"></a><a name="batchMatrixSetDiag"></a><span class='hs-definition'>batchMatrixSetDiag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6776"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-6777"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__</span>
<a name="line-6778"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-6779"></a><span class='hs-definition'>batchMatrixSetDiag</span> <span class='hs-varid'>input</span> <span class='hs-varid'>diagonal</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6780"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixSetDiag"</span>
<a name="line-6781"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6782"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>diagonal</span>
<a name="line-6783"></a><span class='hs-comment'>{-
<a name="line-6784"></a>attr { name: "T" type: "type" }
<a name="line-6785"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-6786"></a>input_arg { name: "diagonal" type_attr: "T" }
<a name="line-6787"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-6788"></a>-}</span>
<a name="line-6789"></a>
<a name="line-6790"></a><span class='hs-comment'>-- | </span>
<a name="line-6791"></a>
<a name="line-6792"></a><a name="batchMatrixDiag"></a><span class='hs-definition'>batchMatrixDiag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__</span>
<a name="line-6793"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-6794"></a><span class='hs-definition'>batchMatrixDiag</span> <span class='hs-varid'>diagonal</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6795"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixDiag"</span>
<a name="line-6796"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6797"></a>        <span class='hs-varid'>diagonal</span>
<a name="line-6798"></a><span class='hs-comment'>{-
<a name="line-6799"></a>attr { name: "T" type: "type" }
<a name="line-6800"></a>input_arg { name: "diagonal" type_attr: "T" }
<a name="line-6801"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-6802"></a>-}</span>
<a name="line-6803"></a>
<a name="line-6804"></a><span class='hs-comment'>-- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.</span>
<a name="line-6805"></a>
<a name="line-6806"></a><a name="fakeQuantWithMinMaxVarsPerChannelGradient"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsPerChannelGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,</span>
<a name="line-6807"></a>                                                             <span class='hs-comment'>-- shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.</span>
<a name="line-6808"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape</span>
<a name="line-6809"></a>                                                                <span class='hs-comment'>--   same as `gradients`.</span>
<a name="line-6810"></a>                                                                <span class='hs-comment'>-- min, max: Quantization interval, floats of shape `[d]`.</span>
<a name="line-6811"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min__</span>
<a name="line-6812"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max__</span>
<a name="line-6813"></a>                                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-6814"></a>                                                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-6815"></a>                                                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-6816"></a>                                             <span class='hs-comment'>-- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)</span>
<a name="line-6817"></a>                                             <span class='hs-comment'>--</span>
<a name="line-6818"></a>                                             <span class='hs-comment'>-- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as</span>
<a name="line-6819"></a>                                             <span class='hs-comment'>-- `inputs`:</span>
<a name="line-6820"></a>                                             <span class='hs-comment'>--   `gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`.</span>
<a name="line-6821"></a>                                             <span class='hs-comment'>--</span>
<a name="line-6822"></a>                                             <span class='hs-comment'>-- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`:</span>
<a name="line-6823"></a>                                             <span class='hs-comment'>-- `sum_per_d(gradients * (inputs &lt; min))`.</span>
<a name="line-6824"></a>                                             <span class='hs-comment'>--</span>
<a name="line-6825"></a>                                             <span class='hs-comment'>-- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`:</span>
<a name="line-6826"></a>                                             <span class='hs-comment'>-- `sum_per_d(gradients * (inputs &gt; max))`.</span>
<a name="line-6827"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsPerChannelGradient</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span>
<a name="line-6828"></a>                                          <span class='hs-varid'>max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6829"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxVarsPerChannelGradient"</span><span class='hs-layout'>)</span>
<a name="line-6830"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span>
<a name="line-6831"></a><span class='hs-comment'>{-
<a name="line-6832"></a>input_arg {
<a name="line-6833"></a>  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`."
<a name="line-6834"></a>  name: "gradients"
<a name="line-6835"></a>  type: DT_FLOAT
<a name="line-6836"></a>}
<a name="line-6837"></a>input_arg {
<a name="line-6838"></a>  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n  same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`."
<a name="line-6839"></a>  name: "inputs"
<a name="line-6840"></a>  type: DT_FLOAT
<a name="line-6841"></a>}
<a name="line-6842"></a>input_arg { name: "min" type: DT_FLOAT }
<a name="line-6843"></a>input_arg { name: "max" type: DT_FLOAT }
<a name="line-6844"></a>output_arg {
<a name="line-6845"></a>  description: "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n  `gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`."
<a name="line-6846"></a>  name: "backprops_wrt_input"
<a name="line-6847"></a>  type: DT_FLOAT
<a name="line-6848"></a>}
<a name="line-6849"></a>output_arg {
<a name="line-6850"></a>  description: "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs &lt; min))`."
<a name="line-6851"></a>  name: "backprop_wrt_min"
<a name="line-6852"></a>  type: DT_FLOAT
<a name="line-6853"></a>}
<a name="line-6854"></a>output_arg {
<a name="line-6855"></a>  description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs &gt; max))`."
<a name="line-6856"></a>  name: "backprop_wrt_max"
<a name="line-6857"></a>  type: DT_FLOAT
<a name="line-6858"></a>}
<a name="line-6859"></a>-}</span>
<a name="line-6860"></a>
<a name="line-6861"></a><a name="sparseSegmentSqrtNGrad"></a><span class='hs-comment'>-- | Computes gradients for SparseSegmentSqrtN.</span>
<a name="line-6862"></a><span class='hs-comment'>--</span>
<a name="line-6863"></a><span class='hs-comment'>-- Returns tensor "output" with same shape as grad, except for dimension 0 whose</span>
<a name="line-6864"></a><span class='hs-comment'>-- value is output_dim0.</span>
<a name="line-6865"></a><span class='hs-definition'>sparseSegmentSqrtNGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6866"></a>                                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-6867"></a>                                                       <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-6868"></a>                                                       <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6869"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6870"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op.</span>
<a name="line-6871"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op.</span>
<a name="line-6872"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op.</span>
<a name="line-6873"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op.</span>
<a name="line-6874"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-6875"></a><span class='hs-definition'>sparseSegmentSqrtNGrad</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>output_dim0</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6876"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSegmentSqrtNGrad"</span>
<a name="line-6877"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-6878"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6879"></a>        <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>output_dim0</span>
<a name="line-6880"></a><span class='hs-comment'>{-
<a name="line-6881"></a>attr {
<a name="line-6882"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-6883"></a>  name: "T"
<a name="line-6884"></a>  type: "type"
<a name="line-6885"></a>}
<a name="line-6886"></a>attr {
<a name="line-6887"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-6888"></a>  default_value { type: DT_INT32 }
<a name="line-6889"></a>  name: "Tidx"
<a name="line-6890"></a>  type: "type"
<a name="line-6891"></a>}
<a name="line-6892"></a>input_arg {
<a name="line-6893"></a>  description: "gradient propagated to the SparseSegmentSqrtN op."
<a name="line-6894"></a>  name: "grad"
<a name="line-6895"></a>  type_attr: "T"
<a name="line-6896"></a>}
<a name="line-6897"></a>input_arg {
<a name="line-6898"></a>  description: "indices passed to the corresponding SparseSegmentSqrtN op."
<a name="line-6899"></a>  name: "indices"
<a name="line-6900"></a>  type_attr: "Tidx"
<a name="line-6901"></a>}
<a name="line-6902"></a>input_arg {
<a name="line-6903"></a>  description: "segment_ids passed to the corresponding SparseSegmentSqrtN op."
<a name="line-6904"></a>  name: "segment_ids"
<a name="line-6905"></a>  type: DT_INT32
<a name="line-6906"></a>}
<a name="line-6907"></a>input_arg {
<a name="line-6908"></a>  description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op."
<a name="line-6909"></a>  name: "output_dim0"
<a name="line-6910"></a>  type: DT_INT32
<a name="line-6911"></a>}
<a name="line-6912"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-6913"></a>-}</span>
<a name="line-6914"></a>
<a name="line-6915"></a><a name="fakeQuantWithMinMaxVarsPerChannel"></a><span class='hs-comment'>-- | Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,</span>
<a name="line-6916"></a><span class='hs-comment'>--</span>
<a name="line-6917"></a><span class='hs-comment'>-- `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`</span>
<a name="line-6918"></a><span class='hs-comment'>-- to 'outputs' tensor of same shape as `inputs`.</span>
<a name="line-6919"></a><span class='hs-comment'>-- </span>
<a name="line-6920"></a><span class='hs-comment'>-- [min; max] is the clamping range for the 'inputs' data in the corresponding</span>
<a name="line-6921"></a><span class='hs-comment'>-- depth channel.  Op divides this range into 255 steps (total of 256 values), then</span>
<a name="line-6922"></a><span class='hs-comment'>-- replaces each 'inputs' value with the closest of the quantized step values.</span>
<a name="line-6923"></a><span class='hs-comment'>-- </span>
<a name="line-6924"></a><span class='hs-comment'>-- This operation has a gradient and thus allows for training `min` and `max` values.</span>
<a name="line-6925"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsPerChannel</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__</span>
<a name="line-6926"></a>                                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min__</span>
<a name="line-6927"></a>                                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max__</span>
<a name="line-6928"></a>                                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __outputs__</span>
<a name="line-6929"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsPerChannel</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6930"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxVarsPerChannel"</span><span class='hs-layout'>)</span>
<a name="line-6931"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span>
<a name="line-6932"></a><span class='hs-comment'>{-
<a name="line-6933"></a>input_arg { name: "inputs" type: DT_FLOAT }
<a name="line-6934"></a>input_arg { name: "min" type: DT_FLOAT }
<a name="line-6935"></a>input_arg { name: "max" type: DT_FLOAT }
<a name="line-6936"></a>output_arg { name: "outputs" type: DT_FLOAT }
<a name="line-6937"></a>-}</span>
<a name="line-6938"></a>
<a name="line-6939"></a><a name="scalarSummary"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with scalar values.</span>
<a name="line-6940"></a><span class='hs-comment'>--</span>
<a name="line-6941"></a><span class='hs-comment'>-- The input `tags` and `values` must have the same shape.  The generated summary</span>
<a name="line-6942"></a><span class='hs-comment'>-- has a summary value for each tag-value pair in `tags` and `values`.</span>
<a name="line-6943"></a><span class='hs-definition'>scalarSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-6944"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-6945"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6946"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-6947"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-6948"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-6949"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-6950"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tags__: Tags for the summary.</span>
<a name="line-6951"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __values__: Same shape as `tags.  Values for the summary.</span>
<a name="line-6952"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar.  Serialized `Summary` protocol buffer.</span>
<a name="line-6953"></a><span class='hs-definition'>scalarSummary</span> <span class='hs-varid'>tags</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-6954"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScalarSummary"</span>
<a name="line-6955"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-6956"></a>        <span class='hs-varid'>tags</span> <span class='hs-varid'>values</span>
<a name="line-6957"></a><span class='hs-comment'>{-
<a name="line-6958"></a>attr {
<a name="line-6959"></a>  allowed_values {
<a name="line-6960"></a>    list {
<a name="line-6961"></a>      type: DT_FLOAT
<a name="line-6962"></a>      type: DT_DOUBLE
<a name="line-6963"></a>      type: DT_INT32
<a name="line-6964"></a>      type: DT_INT64
<a name="line-6965"></a>      type: DT_UINT8
<a name="line-6966"></a>      type: DT_INT16
<a name="line-6967"></a>      type: DT_INT8
<a name="line-6968"></a>      type: DT_UINT16
<a name="line-6969"></a>      type: DT_HALF
<a name="line-6970"></a>    }
<a name="line-6971"></a>  }
<a name="line-6972"></a>  name: "T"
<a name="line-6973"></a>  type: "type"
<a name="line-6974"></a>}
<a name="line-6975"></a>input_arg {
<a name="line-6976"></a>  description: "Tags for the summary." name: "tags" type: DT_STRING
<a name="line-6977"></a>}
<a name="line-6978"></a>input_arg {
<a name="line-6979"></a>  description: "Same shape as `tags.  Values for the summary."
<a name="line-6980"></a>  name: "values"
<a name="line-6981"></a>  type_attr: "T"
<a name="line-6982"></a>}
<a name="line-6983"></a>output_arg {
<a name="line-6984"></a>  description: "Scalar.  Serialized `Summary` protocol buffer."
<a name="line-6985"></a>  name: "summary"
<a name="line-6986"></a>  type: DT_STRING
<a name="line-6987"></a>}
<a name="line-6988"></a>-}</span>
<a name="line-6989"></a>
<a name="line-6990"></a><a name="neg"></a><span class='hs-comment'>-- | Computes numerical negative value element-wise.</span>
<a name="line-6991"></a><span class='hs-comment'>--</span>
<a name="line-6992"></a><span class='hs-comment'>-- I.e., \\(y = -x\\).</span>
<a name="line-6993"></a><span class='hs-definition'>neg</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6994"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-6995"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-6996"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-6997"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-6998"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-6999"></a><span class='hs-definition'>neg</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7000"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Neg"</span>
<a name="line-7001"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7002"></a>        <span class='hs-varid'>x</span>
<a name="line-7003"></a><span class='hs-comment'>{-
<a name="line-7004"></a>attr {
<a name="line-7005"></a>  allowed_values {
<a name="line-7006"></a>    list {
<a name="line-7007"></a>      type: DT_HALF
<a name="line-7008"></a>      type: DT_FLOAT
<a name="line-7009"></a>      type: DT_DOUBLE
<a name="line-7010"></a>      type: DT_INT32
<a name="line-7011"></a>      type: DT_INT64
<a name="line-7012"></a>      type: DT_COMPLEX64
<a name="line-7013"></a>      type: DT_COMPLEX128
<a name="line-7014"></a>    }
<a name="line-7015"></a>  }
<a name="line-7016"></a>  name: "T"
<a name="line-7017"></a>  type: "type"
<a name="line-7018"></a>}
<a name="line-7019"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-7020"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-7021"></a>-}</span>
<a name="line-7022"></a>
<a name="line-7023"></a><span class='hs-comment'>-- | Compute gradients for a FakeQuantWithMinMaxArgs operation.</span>
<a name="line-7024"></a>
<a name="line-7025"></a><a name="fakeQuantWithMinMaxArgsGradient"></a><span class='hs-definition'>fakeQuantWithMinMaxArgsGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.</span>
<a name="line-7026"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.</span>
<a name="line-7027"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:</span>
<a name="line-7028"></a>                                   <span class='hs-comment'>-- `gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`.</span>
<a name="line-7029"></a><span class='hs-definition'>fakeQuantWithMinMaxArgsGradient</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7030"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxArgsGradient"</span><span class='hs-layout'>)</span>
<a name="line-7031"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span>
<a name="line-7032"></a><span class='hs-comment'>{-
<a name="line-7033"></a>attr { default_value { f: -6.0 } name: "min" type: "float" }
<a name="line-7034"></a>attr { default_value { f: 6.0 } name: "max" type: "float" }
<a name="line-7035"></a>input_arg {
<a name="line-7036"></a>  description: "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation."
<a name="line-7037"></a>  name: "gradients"
<a name="line-7038"></a>  type: DT_FLOAT
<a name="line-7039"></a>}
<a name="line-7040"></a>input_arg {
<a name="line-7041"></a>  description: "Values passed as inputs to the FakeQuantWithMinMaxArgs operation."
<a name="line-7042"></a>  name: "inputs"
<a name="line-7043"></a>  type: DT_FLOAT
<a name="line-7044"></a>}
<a name="line-7045"></a>output_arg {
<a name="line-7046"></a>  description: "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`."
<a name="line-7047"></a>  name: "backprops"
<a name="line-7048"></a>  type: DT_FLOAT
<a name="line-7049"></a>}
<a name="line-7050"></a>-}</span>
<a name="line-7051"></a>
<a name="line-7052"></a><a name="debugNanCount"></a><span class='hs-comment'>-- | Debug NaN Value Counter Op</span>
<a name="line-7053"></a><span class='hs-comment'>--</span>
<a name="line-7054"></a><span class='hs-comment'>-- Counts number of NaNs in the input tensor, for debugging.</span>
<a name="line-7055"></a><span class='hs-definition'>debugNanCount</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7056"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Input tensor, non-Reference type.</span>
<a name="line-7057"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__: An integer output tensor that is the number of NaNs in the input.</span>
<a name="line-7058"></a><span class='hs-definition'>debugNanCount</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7059"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DebugNanCount"</span>
<a name="line-7060"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7061"></a>        <span class='hs-varid'>input</span>
<a name="line-7062"></a><span class='hs-comment'>{-
<a name="line-7063"></a>attr { name: "T" type: "type" }
<a name="line-7064"></a>attr {
<a name="line-7065"></a>  default_value { s: "" }
<a name="line-7066"></a>  description: "Name of the input tensor."
<a name="line-7067"></a>  name: "tensor_name"
<a name="line-7068"></a>  type: "string"
<a name="line-7069"></a>}
<a name="line-7070"></a>attr {
<a name="line-7071"></a>  default_value { list { } }
<a name="line-7072"></a>  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
<a name="line-7073"></a>  name: "debug_urls"
<a name="line-7074"></a>  type: "list(string)"
<a name="line-7075"></a>}
<a name="line-7076"></a>input_arg {
<a name="line-7077"></a>  description: "Input tensor, non-Reference type."
<a name="line-7078"></a>  name: "input"
<a name="line-7079"></a>  type_attr: "T"
<a name="line-7080"></a>}
<a name="line-7081"></a>output_arg {
<a name="line-7082"></a>  description: "An integer output tensor that is the number of NaNs in the input."
<a name="line-7083"></a>  name: "output"
<a name="line-7084"></a>  type: DT_INT64
<a name="line-7085"></a>}
<a name="line-7086"></a>-}</span>
<a name="line-7087"></a>
<a name="line-7088"></a><a name="debugIdentity"></a><span class='hs-comment'>-- | Debug Identity Op.</span>
<a name="line-7089"></a><span class='hs-comment'>--</span>
<a name="line-7090"></a><span class='hs-comment'>-- Provides an identity mapping of the non-Ref type input tensor for debugging.</span>
<a name="line-7091"></a><span class='hs-definition'>debugIdentity</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7092"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Input tensor, non-Reference type.</span>
<a name="line-7093"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Output tensor that equals the input tensor.</span>
<a name="line-7094"></a><span class='hs-definition'>debugIdentity</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7095"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DebugIdentity"</span>
<a name="line-7096"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7097"></a>        <span class='hs-varid'>input</span>
<a name="line-7098"></a><span class='hs-comment'>{-
<a name="line-7099"></a>attr { name: "T" type: "type" }
<a name="line-7100"></a>attr {
<a name="line-7101"></a>  default_value { s: "" }
<a name="line-7102"></a>  description: "Name of the input tensor."
<a name="line-7103"></a>  name: "tensor_name"
<a name="line-7104"></a>  type: "string"
<a name="line-7105"></a>}
<a name="line-7106"></a>attr {
<a name="line-7107"></a>  default_value { list { } }
<a name="line-7108"></a>  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
<a name="line-7109"></a>  name: "debug_urls"
<a name="line-7110"></a>  type: "list(string)"
<a name="line-7111"></a>}
<a name="line-7112"></a>input_arg {
<a name="line-7113"></a>  description: "Input tensor, non-Reference type."
<a name="line-7114"></a>  name: "input"
<a name="line-7115"></a>  type_attr: "T"
<a name="line-7116"></a>}
<a name="line-7117"></a>output_arg {
<a name="line-7118"></a>  description: "Output tensor that equals the input tensor."
<a name="line-7119"></a>  name: "output"
<a name="line-7120"></a>  type_attr: "T"
<a name="line-7121"></a>}
<a name="line-7122"></a>-}</span>
<a name="line-7123"></a>
<a name="line-7124"></a><a name="bitcast"></a><span class='hs-comment'>-- | Bitcasts a tensor from one type to another without copying data.</span>
<a name="line-7125"></a><span class='hs-comment'>--</span>
<a name="line-7126"></a><span class='hs-comment'>-- Given a tensor `input`, this operation returns a tensor that has the same buffer</span>
<a name="line-7127"></a><span class='hs-comment'>-- data as `input` with datatype `type`.</span>
<a name="line-7128"></a><span class='hs-comment'>-- </span>
<a name="line-7129"></a><span class='hs-comment'>-- If the input datatype `T` is larger than the output datatype `type` then the</span>
<a name="line-7130"></a><span class='hs-comment'>-- shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].</span>
<a name="line-7131"></a><span class='hs-comment'>-- </span>
<a name="line-7132"></a><span class='hs-comment'>-- If `T` is smaller than `type`, the operator requires that the rightmost</span>
<a name="line-7133"></a><span class='hs-comment'>-- dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from</span>
<a name="line-7134"></a><span class='hs-comment'>-- [..., sizeof(`type`)/sizeof(`T`)] to [...].</span>
<a name="line-7135"></a><span class='hs-comment'>-- </span>
<a name="line-7136"></a><span class='hs-comment'>-- *NOTE*: Bitcast is implemented as a low-level cast, so machines with different</span>
<a name="line-7137"></a><span class='hs-comment'>-- endian orderings will give different results.</span>
<a name="line-7138"></a><span class='hs-definition'>bitcast</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>type'</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-7139"></a>                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7140"></a>                                        <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7141"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7142"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-7143"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-7144"></a>                                        <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>type'</span><span class='hs-layout'>,</span>
<a name="line-7145"></a>                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7146"></a>                                        <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7147"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7148"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-7149"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-7150"></a>                                        <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>type'</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7151"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-7152"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>type'</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-7153"></a><span class='hs-definition'>bitcast</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7154"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Bitcast"</span>
<a name="line-7155"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-7156"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>type'</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7157"></a>        <span class='hs-varid'>input</span>
<a name="line-7158"></a><span class='hs-comment'>{-
<a name="line-7159"></a>attr {
<a name="line-7160"></a>  allowed_values {
<a name="line-7161"></a>    list {
<a name="line-7162"></a>      type: DT_FLOAT
<a name="line-7163"></a>      type: DT_DOUBLE
<a name="line-7164"></a>      type: DT_INT64
<a name="line-7165"></a>      type: DT_INT32
<a name="line-7166"></a>      type: DT_UINT8
<a name="line-7167"></a>      type: DT_UINT16
<a name="line-7168"></a>      type: DT_INT16
<a name="line-7169"></a>      type: DT_INT8
<a name="line-7170"></a>      type: DT_COMPLEX64
<a name="line-7171"></a>      type: DT_COMPLEX128
<a name="line-7172"></a>      type: DT_QINT8
<a name="line-7173"></a>      type: DT_QUINT8
<a name="line-7174"></a>      type: DT_QINT32
<a name="line-7175"></a>      type: DT_HALF
<a name="line-7176"></a>    }
<a name="line-7177"></a>  }
<a name="line-7178"></a>  name: "T"
<a name="line-7179"></a>  type: "type"
<a name="line-7180"></a>}
<a name="line-7181"></a>attr {
<a name="line-7182"></a>  allowed_values {
<a name="line-7183"></a>    list {
<a name="line-7184"></a>      type: DT_FLOAT
<a name="line-7185"></a>      type: DT_DOUBLE
<a name="line-7186"></a>      type: DT_INT64
<a name="line-7187"></a>      type: DT_INT32
<a name="line-7188"></a>      type: DT_UINT8
<a name="line-7189"></a>      type: DT_UINT16
<a name="line-7190"></a>      type: DT_INT16
<a name="line-7191"></a>      type: DT_INT8
<a name="line-7192"></a>      type: DT_COMPLEX64
<a name="line-7193"></a>      type: DT_COMPLEX128
<a name="line-7194"></a>      type: DT_QINT8
<a name="line-7195"></a>      type: DT_QUINT8
<a name="line-7196"></a>      type: DT_QINT32
<a name="line-7197"></a>      type: DT_HALF
<a name="line-7198"></a>    }
<a name="line-7199"></a>  }
<a name="line-7200"></a>  name: "type"
<a name="line-7201"></a>  type: "type"
<a name="line-7202"></a>}
<a name="line-7203"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-7204"></a>output_arg { name: "output" type_attr: "type" }
<a name="line-7205"></a>-}</span>
<a name="line-7206"></a>
<a name="line-7207"></a><a name="sigmoid"></a><span class='hs-comment'>-- | Computes sigmoid of `x` element-wise.</span>
<a name="line-7208"></a><span class='hs-comment'>--</span>
<a name="line-7209"></a><span class='hs-comment'>-- Specifically, `y = 1 / (1 + exp(-x))`.</span>
<a name="line-7210"></a><span class='hs-definition'>sigmoid</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7211"></a>                                                <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-7212"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-7213"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7214"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-7215"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-7216"></a><span class='hs-definition'>sigmoid</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7217"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Sigmoid"</span>
<a name="line-7218"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7219"></a>        <span class='hs-varid'>x</span>
<a name="line-7220"></a><span class='hs-comment'>{-
<a name="line-7221"></a>attr {
<a name="line-7222"></a>  allowed_values {
<a name="line-7223"></a>    list {
<a name="line-7224"></a>      type: DT_HALF
<a name="line-7225"></a>      type: DT_FLOAT
<a name="line-7226"></a>      type: DT_DOUBLE
<a name="line-7227"></a>      type: DT_COMPLEX64
<a name="line-7228"></a>      type: DT_COMPLEX128
<a name="line-7229"></a>    }
<a name="line-7230"></a>  }
<a name="line-7231"></a>  name: "T"
<a name="line-7232"></a>  type: "type"
<a name="line-7233"></a>}
<a name="line-7234"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-7235"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-7236"></a>-}</span>
<a name="line-7237"></a>
<a name="line-7238"></a><a name="copy"></a><span class='hs-comment'>-- | Copy Op.</span>
<a name="line-7239"></a><span class='hs-comment'>--</span>
<a name="line-7240"></a><span class='hs-comment'>-- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the</span>
<a name="line-7241"></a><span class='hs-comment'>-- device on which the tensor is allocated.</span>
<a name="line-7242"></a><span class='hs-comment'>-- </span>
<a name="line-7243"></a><span class='hs-comment'>-- Unlike the CopyHost Op, this op does not have HostMemory constraint on its</span>
<a name="line-7244"></a><span class='hs-comment'>-- input or output.</span>
<a name="line-7245"></a><span class='hs-definition'>copy</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7246"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Input tensor.</span>
<a name="line-7247"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Output tensor, deep-copied from input.</span>
<a name="line-7248"></a><span class='hs-definition'>copy</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7249"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Copy"</span>
<a name="line-7250"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7251"></a>        <span class='hs-varid'>input</span>
<a name="line-7252"></a><span class='hs-comment'>{-
<a name="line-7253"></a>attr { name: "T" type: "type" }
<a name="line-7254"></a>attr {
<a name="line-7255"></a>  default_value { s: "" }
<a name="line-7256"></a>  description: "The name of the input tensor."
<a name="line-7257"></a>  name: "tensor_name"
<a name="line-7258"></a>  type: "string"
<a name="line-7259"></a>}
<a name="line-7260"></a>input_arg {
<a name="line-7261"></a>  description: "Input tensor." name: "input" type_attr: "T"
<a name="line-7262"></a>}
<a name="line-7263"></a>output_arg {
<a name="line-7264"></a>  description: "Output tensor, deep-copied from input."
<a name="line-7265"></a>  name: "output"
<a name="line-7266"></a>  type_attr: "T"
<a name="line-7267"></a>}
<a name="line-7268"></a>-}</span>
<a name="line-7269"></a>
<a name="line-7270"></a><a name="fixedUnigramCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a learned unigram distribution.</span>
<a name="line-7271"></a><span class='hs-comment'>--</span>
<a name="line-7272"></a><span class='hs-comment'>-- A unigram sampler could use a fixed unigram distribution read from a</span>
<a name="line-7273"></a><span class='hs-comment'>-- file or passed in as an in-memory array instead of building up the distribution</span>
<a name="line-7274"></a><span class='hs-comment'>-- from data on the fly. There is also an option to skew the distribution by</span>
<a name="line-7275"></a><span class='hs-comment'>-- applying a distortion power to the weights.</span>
<a name="line-7276"></a><span class='hs-comment'>-- </span>
<a name="line-7277"></a><span class='hs-comment'>-- The vocabulary file should be in CSV-like format, with the last field</span>
<a name="line-7278"></a><span class='hs-comment'>-- being the weight associated with the word.</span>
<a name="line-7279"></a><span class='hs-comment'>-- </span>
<a name="line-7280"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-7281"></a><span class='hs-comment'>-- </span>
<a name="line-7282"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-7283"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-7284"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-7285"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-7286"></a><span class='hs-definition'>fixedUnigramCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to randomly sample per batch.</span>
<a name="line-7287"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-7288"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).</span>
<a name="line-7289"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-7290"></a>                                        <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-7291"></a>                                        <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-7292"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-7293"></a>                                                            <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-7294"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-7295"></a>                                    <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-7296"></a>                                <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-7297"></a>                                <span class='hs-comment'>--</span>
<a name="line-7298"></a>                                <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-7299"></a>                                <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-7300"></a>                                <span class='hs-comment'>--</span>
<a name="line-7301"></a>                                <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-7302"></a>                                <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-7303"></a>                                <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-7304"></a>                                <span class='hs-comment'>--</span>
<a name="line-7305"></a>                                <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-7306"></a>                                <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-7307"></a>                                <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-7308"></a>                                <span class='hs-comment'>-- probability.</span>
<a name="line-7309"></a><span class='hs-definition'>fixedUnigramCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>range_max</span> <span class='hs-varid'>unique</span>
<a name="line-7310"></a>                             <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7311"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FixedUnigramCandidateSampler"</span>
<a name="line-7312"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-7313"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-7314"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"range_max"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>range_max</span>
<a name="line-7315"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-7316"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-7317"></a><span class='hs-comment'>{-
<a name="line-7318"></a>attr {
<a name="line-7319"></a>  description: "Number of true labels per context."
<a name="line-7320"></a>  has_minimum: true
<a name="line-7321"></a>  minimum: 1
<a name="line-7322"></a>  name: "num_true"
<a name="line-7323"></a>  type: "int"
<a name="line-7324"></a>}
<a name="line-7325"></a>attr {
<a name="line-7326"></a>  description: "Number of candidates to randomly sample per batch."
<a name="line-7327"></a>  has_minimum: true
<a name="line-7328"></a>  minimum: 1
<a name="line-7329"></a>  name: "num_sampled"
<a name="line-7330"></a>  type: "int"
<a name="line-7331"></a>}
<a name="line-7332"></a>attr {
<a name="line-7333"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-7334"></a>  name: "unique"
<a name="line-7335"></a>  type: "bool"
<a name="line-7336"></a>}
<a name="line-7337"></a>attr {
<a name="line-7338"></a>  description: "The sampler will sample integers from the interval [0, range_max)."
<a name="line-7339"></a>  has_minimum: true
<a name="line-7340"></a>  minimum: 1
<a name="line-7341"></a>  name: "range_max"
<a name="line-7342"></a>  type: "int"
<a name="line-7343"></a>}
<a name="line-7344"></a>attr {
<a name="line-7345"></a>  default_value { s: "" }
<a name="line-7346"></a>  description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op."
<a name="line-7347"></a>  name: "vocab_file"
<a name="line-7348"></a>  type: "string"
<a name="line-7349"></a>}
<a name="line-7350"></a>attr {
<a name="line-7351"></a>  default_value { f: 1.0 }
<a name="line-7352"></a>  description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution."
<a name="line-7353"></a>  name: "distortion"
<a name="line-7354"></a>  type: "float"
<a name="line-7355"></a>}
<a name="line-7356"></a>attr {
<a name="line-7357"></a>  default_value { i: 0 }
<a name="line-7358"></a>  description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0."
<a name="line-7359"></a>  name: "num_reserved_ids"
<a name="line-7360"></a>  type: "int"
<a name="line-7361"></a>}
<a name="line-7362"></a>attr {
<a name="line-7363"></a>  default_value { i: 1 }
<a name="line-7364"></a>  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation."
<a name="line-7365"></a>  has_minimum: true
<a name="line-7366"></a>  minimum: 1
<a name="line-7367"></a>  name: "num_shards"
<a name="line-7368"></a>  type: "int"
<a name="line-7369"></a>}
<a name="line-7370"></a>attr {
<a name="line-7371"></a>  default_value { i: 0 }
<a name="line-7372"></a>  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used."
<a name="line-7373"></a>  has_minimum: true
<a name="line-7374"></a>  name: "shard"
<a name="line-7375"></a>  type: "int"
<a name="line-7376"></a>}
<a name="line-7377"></a>attr {
<a name="line-7378"></a>  default_value { list { } }
<a name="line-7379"></a>  description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op."
<a name="line-7380"></a>  name: "unigrams"
<a name="line-7381"></a>  type: "list(float)"
<a name="line-7382"></a>}
<a name="line-7383"></a>attr {
<a name="line-7384"></a>  default_value { i: 0 }
<a name="line-7385"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-7386"></a>  name: "seed"
<a name="line-7387"></a>  type: "int"
<a name="line-7388"></a>}
<a name="line-7389"></a>attr {
<a name="line-7390"></a>  default_value { i: 0 }
<a name="line-7391"></a>  description: "An second seed to avoid seed collision."
<a name="line-7392"></a>  name: "seed2"
<a name="line-7393"></a>  type: "int"
<a name="line-7394"></a>}
<a name="line-7395"></a>input_arg {
<a name="line-7396"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-7397"></a>  name: "true_classes"
<a name="line-7398"></a>  type: DT_INT64
<a name="line-7399"></a>}
<a name="line-7400"></a>output_arg {
<a name="line-7401"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-7402"></a>  name: "sampled_candidates"
<a name="line-7403"></a>  type: DT_INT64
<a name="line-7404"></a>}
<a name="line-7405"></a>output_arg {
<a name="line-7406"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-7407"></a>  name: "true_expected_count"
<a name="line-7408"></a>  type: DT_FLOAT
<a name="line-7409"></a>}
<a name="line-7410"></a>output_arg {
<a name="line-7411"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-7412"></a>  name: "sampled_expected_count"
<a name="line-7413"></a>  type: DT_FLOAT
<a name="line-7414"></a>}
<a name="line-7415"></a>-}</span>
<a name="line-7416"></a>
<a name="line-7417"></a><a name="listDiff"></a><span class='hs-comment'>-- | Computes the difference between two lists of numbers or strings.</span>
<a name="line-7418"></a><span class='hs-comment'>--</span>
<a name="line-7419"></a><span class='hs-comment'>-- Given a list `x` and a list `y`, this operation returns a list `out` that</span>
<a name="line-7420"></a><span class='hs-comment'>-- represents all values that are in `x` but not in `y`. The returned list `out`</span>
<a name="line-7421"></a><span class='hs-comment'>-- is sorted in the same order that the numbers appear in `x` (duplicates are</span>
<a name="line-7422"></a><span class='hs-comment'>-- preserved). This operation also returns a list `idx` that represents the</span>
<a name="line-7423"></a><span class='hs-comment'>-- position of each `out` element in `x`. In other words:</span>
<a name="line-7424"></a><span class='hs-comment'>-- </span>
<a name="line-7425"></a><span class='hs-comment'>-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`</span>
<a name="line-7426"></a><span class='hs-comment'>-- </span>
<a name="line-7427"></a><span class='hs-comment'>-- For example, given this input:</span>
<a name="line-7428"></a><span class='hs-comment'>-- </span>
<a name="line-7429"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7430"></a><span class='hs-comment'>-- x = [1, 2, 3, 4, 5, 6]</span>
<a name="line-7431"></a><span class='hs-comment'>-- y = [1, 3, 5]</span>
<a name="line-7432"></a><span class='hs-comment'>-- ```</span>
<a name="line-7433"></a><span class='hs-comment'>-- </span>
<a name="line-7434"></a><span class='hs-comment'>-- This operation would return:</span>
<a name="line-7435"></a><span class='hs-comment'>-- </span>
<a name="line-7436"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7437"></a><span class='hs-comment'>-- out ==&gt; [2, 4, 6]</span>
<a name="line-7438"></a><span class='hs-comment'>-- idx ==&gt; [1, 3, 5]</span>
<a name="line-7439"></a><span class='hs-comment'>-- ```</span>
<a name="line-7440"></a><span class='hs-definition'>listDiff</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_idx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>,</span>
<a name="line-7441"></a>                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7442"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7443"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: 1-D. Values to keep.</span>
<a name="line-7444"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__: 1-D. Values to remove.</span>
<a name="line-7445"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ (__out__, __idx__)</span>
<a name="line-7446"></a>            <span class='hs-comment'>--</span>
<a name="line-7447"></a>            <span class='hs-comment'>-- * __out__: 1-D. Values present in `x` but not in `y`.</span>
<a name="line-7448"></a>            <span class='hs-comment'>--</span>
<a name="line-7449"></a>            <span class='hs-comment'>-- * __idx__: 1-D. Positions of `x` values preserved in `out`.</span>
<a name="line-7450"></a><span class='hs-definition'>listDiff</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7451"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ListDiff"</span>
<a name="line-7452"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-7453"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_idx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7454"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-7455"></a><span class='hs-comment'>{-
<a name="line-7456"></a>attr { name: "T" type: "type" }
<a name="line-7457"></a>attr {
<a name="line-7458"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-7459"></a>  default_value { type: DT_INT32 }
<a name="line-7460"></a>  name: "out_idx"
<a name="line-7461"></a>  type: "type"
<a name="line-7462"></a>}
<a name="line-7463"></a>input_arg {
<a name="line-7464"></a>  description: "1-D. Values to keep." name: "x" type_attr: "T"
<a name="line-7465"></a>}
<a name="line-7466"></a>input_arg {
<a name="line-7467"></a>  description: "1-D. Values to remove." name: "y" type_attr: "T"
<a name="line-7468"></a>}
<a name="line-7469"></a>output_arg {
<a name="line-7470"></a>  description: "1-D. Values present in `x` but not in `y`."
<a name="line-7471"></a>  name: "out"
<a name="line-7472"></a>  type_attr: "T"
<a name="line-7473"></a>}
<a name="line-7474"></a>output_arg {
<a name="line-7475"></a>  description: "1-D. Positions of `x` values preserved in `out`."
<a name="line-7476"></a>  name: "idx"
<a name="line-7477"></a>  type_attr: "out_idx"
<a name="line-7478"></a>}
<a name="line-7479"></a>-}</span>
<a name="line-7480"></a>
<a name="line-7481"></a><span class='hs-comment'>-- | Extract `patches` from `images` and put them in the "depth" output dimension.</span>
<a name="line-7482"></a>
<a name="line-7483"></a><a name="extractImagePatches"></a><span class='hs-definition'>extractImagePatches</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-7484"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7485"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-7486"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-7487"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-7488"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-7489"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7490"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.</span>
<a name="line-7491"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *</span>
<a name="line-7492"></a>                       <span class='hs-comment'>-- ksize_cols * depth]` containing image patches with size</span>
<a name="line-7493"></a>                       <span class='hs-comment'>-- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.</span>
<a name="line-7494"></a><span class='hs-definition'>extractImagePatches</span> <span class='hs-varid'>images</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7495"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ExtractImagePatches"</span>
<a name="line-7496"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7497"></a>        <span class='hs-varid'>images</span>
<a name="line-7498"></a><span class='hs-comment'>{-
<a name="line-7499"></a>attr {
<a name="line-7500"></a>  description: "The size of the sliding window for each dimension of `images`."
<a name="line-7501"></a>  has_minimum: true
<a name="line-7502"></a>  minimum: 4
<a name="line-7503"></a>  name: "ksizes"
<a name="line-7504"></a>  type: "list(int)"
<a name="line-7505"></a>}
<a name="line-7506"></a>attr {
<a name="line-7507"></a>  description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`."
<a name="line-7508"></a>  has_minimum: true
<a name="line-7509"></a>  minimum: 4
<a name="line-7510"></a>  name: "strides"
<a name="line-7511"></a>  type: "list(int)"
<a name="line-7512"></a>}
<a name="line-7513"></a>attr {
<a name="line-7514"></a>  description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`."
<a name="line-7515"></a>  has_minimum: true
<a name="line-7516"></a>  minimum: 4
<a name="line-7517"></a>  name: "rates"
<a name="line-7518"></a>  type: "list(int)"
<a name="line-7519"></a>}
<a name="line-7520"></a>attr {
<a name="line-7521"></a>  allowed_values {
<a name="line-7522"></a>    list {
<a name="line-7523"></a>      type: DT_FLOAT
<a name="line-7524"></a>      type: DT_DOUBLE
<a name="line-7525"></a>      type: DT_INT32
<a name="line-7526"></a>      type: DT_INT64
<a name="line-7527"></a>      type: DT_UINT8
<a name="line-7528"></a>      type: DT_INT16
<a name="line-7529"></a>      type: DT_INT8
<a name="line-7530"></a>      type: DT_UINT16
<a name="line-7531"></a>      type: DT_HALF
<a name="line-7532"></a>    }
<a name="line-7533"></a>  }
<a name="line-7534"></a>  name: "T"
<a name="line-7535"></a>  type: "type"
<a name="line-7536"></a>}
<a name="line-7537"></a>attr {
<a name="line-7538"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-7539"></a>  description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n      ksizes = [1, ksize_rows, ksize_cols, 1]\n      strides = [1, strides_rows, strides_cols, 1]\n      rates = [1, rates_rows, rates_cols, 1]\n```"
<a name="line-7540"></a>  name: "padding"
<a name="line-7541"></a>  type: "string"
<a name="line-7542"></a>}
<a name="line-7543"></a>input_arg {
<a name="line-7544"></a>  description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`."
<a name="line-7545"></a>  name: "images"
<a name="line-7546"></a>  type_attr: "T"
<a name="line-7547"></a>}
<a name="line-7548"></a>output_arg {
<a name="line-7549"></a>  description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension."
<a name="line-7550"></a>  name: "patches"
<a name="line-7551"></a>  type_attr: "T"
<a name="line-7552"></a>}
<a name="line-7553"></a>-}</span>
<a name="line-7554"></a>
<a name="line-7555"></a><a name="spaceToDepth"></a><span class='hs-comment'>-- | SpaceToDepth for tensors of type T.</span>
<a name="line-7556"></a><span class='hs-comment'>--</span>
<a name="line-7557"></a><span class='hs-comment'>-- Rearranges blocks of spatial data, into depth. More specifically,</span>
<a name="line-7558"></a><span class='hs-comment'>-- this op outputs a copy of the input tensor where values from the `height`</span>
<a name="line-7559"></a><span class='hs-comment'>-- and `width` dimensions are moved to the `depth` dimension.</span>
<a name="line-7560"></a><span class='hs-comment'>-- The attr `block_size` indicates the input block size and how the data is moved.</span>
<a name="line-7561"></a><span class='hs-comment'>-- </span>
<a name="line-7562"></a><span class='hs-comment'>--   * Non-overlapping blocks of size `block_size x block size` are rearranged</span>
<a name="line-7563"></a><span class='hs-comment'>--     into depth at each location.</span>
<a name="line-7564"></a><span class='hs-comment'>--   * The depth of the output tensor is `input_depth * block_size * block_size`.</span>
<a name="line-7565"></a><span class='hs-comment'>--   * The input tensor's height and width must be divisible by block_size.</span>
<a name="line-7566"></a><span class='hs-comment'>-- </span>
<a name="line-7567"></a><span class='hs-comment'>-- That is, assuming the input is in the shape:</span>
<a name="line-7568"></a><span class='hs-comment'>-- `[batch, height, width, depth]`,</span>
<a name="line-7569"></a><span class='hs-comment'>-- the shape of the output will be:</span>
<a name="line-7570"></a><span class='hs-comment'>-- `[batch, height/block_size, width/block_size, depth*block_size*block_size]`</span>
<a name="line-7571"></a><span class='hs-comment'>-- </span>
<a name="line-7572"></a><span class='hs-comment'>-- This operation requires that the input tensor be of rank 4, and that</span>
<a name="line-7573"></a><span class='hs-comment'>-- `block_size` be &gt;=1 and a divisor of both the input `height` and `width`.</span>
<a name="line-7574"></a><span class='hs-comment'>-- </span>
<a name="line-7575"></a><span class='hs-comment'>-- This operation is useful for resizing the activations between convolutions</span>
<a name="line-7576"></a><span class='hs-comment'>-- (but keeping all data), e.g. instead of pooling. It is also useful for training</span>
<a name="line-7577"></a><span class='hs-comment'>-- purely convolutional models.</span>
<a name="line-7578"></a><span class='hs-comment'>-- </span>
<a name="line-7579"></a><span class='hs-comment'>-- For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:</span>
<a name="line-7580"></a><span class='hs-comment'>-- </span>
<a name="line-7581"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7582"></a><span class='hs-comment'>-- x = [[[[1], [2]],</span>
<a name="line-7583"></a><span class='hs-comment'>--       [[3], [4]]]]</span>
<a name="line-7584"></a><span class='hs-comment'>-- ```</span>
<a name="line-7585"></a><span class='hs-comment'>-- </span>
<a name="line-7586"></a><span class='hs-comment'>-- This operation will output a tensor of shape `[1, 1, 1, 4]`:</span>
<a name="line-7587"></a><span class='hs-comment'>-- </span>
<a name="line-7588"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7589"></a><span class='hs-comment'>-- [[[[1, 2, 3, 4]]]]</span>
<a name="line-7590"></a><span class='hs-comment'>-- ```</span>
<a name="line-7591"></a><span class='hs-comment'>-- </span>
<a name="line-7592"></a><span class='hs-comment'>-- Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,</span>
<a name="line-7593"></a><span class='hs-comment'>-- the corresponding output will have a single element (i.e. width and height are</span>
<a name="line-7594"></a><span class='hs-comment'>-- both 1) and will have a depth of 4 channels (1 * block_size * block_size).</span>
<a name="line-7595"></a><span class='hs-comment'>-- The output element shape is `[1, 1, 4]`.</span>
<a name="line-7596"></a><span class='hs-comment'>-- </span>
<a name="line-7597"></a><span class='hs-comment'>-- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.</span>
<a name="line-7598"></a><span class='hs-comment'>-- </span>
<a name="line-7599"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7600"></a><span class='hs-comment'>-- x = [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-7601"></a><span class='hs-comment'>--       [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-7602"></a><span class='hs-comment'>-- ```</span>
<a name="line-7603"></a><span class='hs-comment'>-- </span>
<a name="line-7604"></a><span class='hs-comment'>-- This operation, for block_size of 2, will return the following tensor of shape</span>
<a name="line-7605"></a><span class='hs-comment'>-- `[1, 1, 1, 12]`</span>
<a name="line-7606"></a><span class='hs-comment'>-- </span>
<a name="line-7607"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7608"></a><span class='hs-comment'>-- [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]</span>
<a name="line-7609"></a><span class='hs-comment'>-- ```</span>
<a name="line-7610"></a><span class='hs-comment'>-- </span>
<a name="line-7611"></a><span class='hs-comment'>-- Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:</span>
<a name="line-7612"></a><span class='hs-comment'>-- </span>
<a name="line-7613"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7614"></a><span class='hs-comment'>-- x = [[[[1],   [2],  [5],  [6]],</span>
<a name="line-7615"></a><span class='hs-comment'>--       [[3],   [4],  [7],  [8]],</span>
<a name="line-7616"></a><span class='hs-comment'>--       [[9],  [10], [13],  [14]],</span>
<a name="line-7617"></a><span class='hs-comment'>--       [[11], [12], [15],  [16]]]]</span>
<a name="line-7618"></a><span class='hs-comment'>-- ```</span>
<a name="line-7619"></a><span class='hs-comment'>-- </span>
<a name="line-7620"></a><span class='hs-comment'>-- the operator will return the following tensor of shape `[1 2 2 4]`:</span>
<a name="line-7621"></a><span class='hs-comment'>-- </span>
<a name="line-7622"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7623"></a><span class='hs-comment'>-- x = [[[[1, 2, 3, 4],</span>
<a name="line-7624"></a><span class='hs-comment'>--        [5, 6, 7, 8]],</span>
<a name="line-7625"></a><span class='hs-comment'>--       [[9, 10, 11, 12],</span>
<a name="line-7626"></a><span class='hs-comment'>--        [13, 14, 15, 16]]]]</span>
<a name="line-7627"></a><span class='hs-comment'>-- ```</span>
<a name="line-7628"></a><span class='hs-definition'>spaceToDepth</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7629"></a>                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __block_size__: The size of the spatial block.</span>
<a name="line-7630"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-7631"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-7632"></a><span class='hs-definition'>spaceToDepth</span> <span class='hs-varid'>block_size</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7633"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SpaceToDepth"</span>
<a name="line-7634"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-7635"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"block_size"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>block_size</span><span class='hs-layout'>)</span>
<a name="line-7636"></a>        <span class='hs-varid'>input</span>
<a name="line-7637"></a><span class='hs-comment'>{-
<a name="line-7638"></a>attr { name: "T" type: "type" }
<a name="line-7639"></a>attr {
<a name="line-7640"></a>  description: "The size of the spatial block."
<a name="line-7641"></a>  has_minimum: true
<a name="line-7642"></a>  minimum: 2
<a name="line-7643"></a>  name: "block_size"
<a name="line-7644"></a>  type: "int"
<a name="line-7645"></a>}
<a name="line-7646"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-7647"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-7648"></a>-}</span>
<a name="line-7649"></a>
<a name="line-7650"></a><span class='hs-comment'>-- | Computes the gradient of the crop_and_resize op wrt the input boxes tensor.</span>
<a name="line-7651"></a>
<a name="line-7652"></a><a name="cropAndResizeGradBoxes"></a><span class='hs-definition'>cropAndResizeGradBoxes</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-7653"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-7654"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7655"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-7656"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-7657"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-7658"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-7659"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7660"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.</span>
<a name="line-7661"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.</span>
<a name="line-7662"></a>                                         <span class='hs-comment'>-- Both `image_height` and `image_width` need to be positive.</span>
<a name="line-7663"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor</span>
<a name="line-7664"></a>                                             <span class='hs-comment'>-- specifies the coordinates of a box in the `box_ind[i]` image and is specified</span>
<a name="line-7665"></a>                                             <span class='hs-comment'>-- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of</span>
<a name="line-7666"></a>                                             <span class='hs-comment'>-- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the</span>
<a name="line-7667"></a>                                             <span class='hs-comment'>-- `[0, 1]` interval of normalized image height is mapped to</span>
<a name="line-7668"></a>                                             <span class='hs-comment'>-- `[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in</span>
<a name="line-7669"></a>                                             <span class='hs-comment'>-- which case the sampled crop is an up-down flipped version of the original</span>
<a name="line-7670"></a>                                             <span class='hs-comment'>-- image. The width dimension is treated similarly. Normalized coordinates</span>
<a name="line-7671"></a>                                             <span class='hs-comment'>-- outside the `[0, 1]` range are allowed, in which case we use</span>
<a name="line-7672"></a>                                             <span class='hs-comment'>-- `extrapolation_value` to extrapolate the input image values.</span>
<a name="line-7673"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.</span>
<a name="line-7674"></a>                                                      <span class='hs-comment'>-- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.</span>
<a name="line-7675"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`.</span>
<a name="line-7676"></a><span class='hs-definition'>cropAndResizeGradBoxes</span> <span class='hs-varid'>grads</span> <span class='hs-varid'>image</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7677"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CropAndResizeGradBoxes"</span>
<a name="line-7678"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7679"></a>        <span class='hs-varid'>grads</span> <span class='hs-varid'>image</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span>
<a name="line-7680"></a><span class='hs-comment'>{-
<a name="line-7681"></a>attr {
<a name="line-7682"></a>  allowed_values {
<a name="line-7683"></a>    list {
<a name="line-7684"></a>      type: DT_UINT8
<a name="line-7685"></a>      type: DT_INT8
<a name="line-7686"></a>      type: DT_INT16
<a name="line-7687"></a>      type: DT_INT32
<a name="line-7688"></a>      type: DT_INT64
<a name="line-7689"></a>      type: DT_HALF
<a name="line-7690"></a>      type: DT_FLOAT
<a name="line-7691"></a>      type: DT_DOUBLE
<a name="line-7692"></a>    }
<a name="line-7693"></a>  }
<a name="line-7694"></a>  name: "T"
<a name="line-7695"></a>  type: "type"
<a name="line-7696"></a>}
<a name="line-7697"></a>attr {
<a name="line-7698"></a>  allowed_values { list { s: "bilinear" } }
<a name="line-7699"></a>  default_value { s: "bilinear" }
<a name="line-7700"></a>  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
<a name="line-7701"></a>  name: "method"
<a name="line-7702"></a>  type: "string"
<a name="line-7703"></a>}
<a name="line-7704"></a>input_arg {
<a name="line-7705"></a>  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
<a name="line-7706"></a>  name: "grads"
<a name="line-7707"></a>  type: DT_FLOAT
<a name="line-7708"></a>}
<a name="line-7709"></a>input_arg {
<a name="line-7710"></a>  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
<a name="line-7711"></a>  name: "image"
<a name="line-7712"></a>  type_attr: "T"
<a name="line-7713"></a>}
<a name="line-7714"></a>input_arg {
<a name="line-7715"></a>  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
<a name="line-7716"></a>  name: "boxes"
<a name="line-7717"></a>  type: DT_FLOAT
<a name="line-7718"></a>}
<a name="line-7719"></a>input_arg {
<a name="line-7720"></a>  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
<a name="line-7721"></a>  name: "box_ind"
<a name="line-7722"></a>  type: DT_INT32
<a name="line-7723"></a>}
<a name="line-7724"></a>output_arg {
<a name="line-7725"></a>  description: "A 2-D tensor of shape `[num_boxes, 4]`."
<a name="line-7726"></a>  name: "output"
<a name="line-7727"></a>  type: DT_FLOAT
<a name="line-7728"></a>}
<a name="line-7729"></a>-}</span>
<a name="line-7730"></a>
<a name="line-7731"></a><a name="batchToSpaceND"></a><span class='hs-comment'>-- | BatchToSpace for N-D tensors of type T.</span>
<a name="line-7732"></a><span class='hs-comment'>--</span>
<a name="line-7733"></a><span class='hs-comment'>-- This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape</span>
<a name="line-7734"></a><span class='hs-comment'>-- `block_shape + [batch]`, interleaves these blocks back into the grid defined by</span>
<a name="line-7735"></a><span class='hs-comment'>-- the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as</span>
<a name="line-7736"></a><span class='hs-comment'>-- the input.  The spatial dimensions of this intermediate result are then</span>
<a name="line-7737"></a><span class='hs-comment'>-- optionally cropped according to `crops` to produce the output.  This is the</span>
<a name="line-7738"></a><span class='hs-comment'>-- reverse of SpaceToBatch.  See below for a precise description.</span>
<a name="line-7739"></a><span class='hs-definition'>batchToSpaceND</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tblock_shape</span> <span class='hs-varid'>tcrops</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-7740"></a>                                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>,</span>
<a name="line-7741"></a>                                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7742"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>,</span>
<a name="line-7743"></a>                                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tcrops</span><span class='hs-layout'>,</span>
<a name="line-7744"></a>                                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7745"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tcrops</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7746"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,</span>
<a name="line-7747"></a>                              <span class='hs-comment'>-- where spatial_shape has M dimensions.</span>
<a name="line-7748"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tblock_shape</span> <span class='hs-comment'>-- ^ __block_shape__: 1-D with shape `[M]`, all values must be &gt;= 1.</span>
<a name="line-7749"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tcrops</span> <span class='hs-comment'>-- ^ __crops__: 2-D with shape `[M, 2]`, all values must be &gt;= 0.</span>
<a name="line-7750"></a>                                      <span class='hs-comment'>--   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input</span>
<a name="line-7751"></a>                                      <span class='hs-comment'>--   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is</span>
<a name="line-7752"></a>                                      <span class='hs-comment'>--   required that</span>
<a name="line-7753"></a>                                      <span class='hs-comment'>--   `crop_start[i] + crop_end[i] &lt;= block_shape[i] * input_shape[i + 1]`.</span>
<a name="line-7754"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7755"></a>                                      <span class='hs-comment'>-- This operation is equivalent to the following steps:</span>
<a name="line-7756"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7757"></a>                                      <span class='hs-comment'>-- 1. Reshape `input` to `reshaped` of shape:</span>
<a name="line-7758"></a>                                      <span class='hs-comment'>--      [block_shape[0], ..., block_shape[M-1],</span>
<a name="line-7759"></a>                                      <span class='hs-comment'>--       batch / prod(block_shape),</span>
<a name="line-7760"></a>                                      <span class='hs-comment'>--       input_shape[1], ..., input_shape[N-1]]</span>
<a name="line-7761"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7762"></a>                                      <span class='hs-comment'>-- 2. Permute dimensions of `reshaped` to produce `permuted` of shape</span>
<a name="line-7763"></a>                                      <span class='hs-comment'>--      [batch / prod(block_shape),</span>
<a name="line-7764"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7765"></a>                                      <span class='hs-comment'>--       input_shape[1], block_shape[0],</span>
<a name="line-7766"></a>                                      <span class='hs-comment'>--       ...,</span>
<a name="line-7767"></a>                                      <span class='hs-comment'>--       input_shape[M], block_shape[M-1],</span>
<a name="line-7768"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7769"></a>                                      <span class='hs-comment'>--       input_shape[M+1], ..., input_shape[N-1]]</span>
<a name="line-7770"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7771"></a>                                      <span class='hs-comment'>-- 3. Reshape `permuted` to produce `reshaped_permuted` of shape</span>
<a name="line-7772"></a>                                      <span class='hs-comment'>--      [batch / prod(block_shape),</span>
<a name="line-7773"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7774"></a>                                      <span class='hs-comment'>--       input_shape[1] * block_shape[0],</span>
<a name="line-7775"></a>                                      <span class='hs-comment'>--       ...,</span>
<a name="line-7776"></a>                                      <span class='hs-comment'>--       input_shape[M] * block_shape[M-1],</span>
<a name="line-7777"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7778"></a>                                      <span class='hs-comment'>--       input_shape[M+1],</span>
<a name="line-7779"></a>                                      <span class='hs-comment'>--       ...,</span>
<a name="line-7780"></a>                                      <span class='hs-comment'>--       input_shape[N-1]]</span>
<a name="line-7781"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7782"></a>                                      <span class='hs-comment'>-- 4. Crop the start and end of dimensions `[1, ..., M]` of</span>
<a name="line-7783"></a>                                      <span class='hs-comment'>--    `reshaped_permuted` according to `crops` to produce the output of shape:</span>
<a name="line-7784"></a>                                      <span class='hs-comment'>--      [batch / prod(block_shape),</span>
<a name="line-7785"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7786"></a>                                      <span class='hs-comment'>--       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],</span>
<a name="line-7787"></a>                                      <span class='hs-comment'>--       ...,</span>
<a name="line-7788"></a>                                      <span class='hs-comment'>--       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],</span>
<a name="line-7789"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7790"></a>                                      <span class='hs-comment'>--       input_shape[M+1], ..., input_shape[N-1]]</span>
<a name="line-7791"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7792"></a>                                      <span class='hs-comment'>-- Some examples:</span>
<a name="line-7793"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7794"></a>                                      <span class='hs-comment'>-- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and</span>
<a name="line-7795"></a>                                      <span class='hs-comment'>--     `crops = [[0, 0], [0, 0]]`:</span>
<a name="line-7796"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7797"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7798"></a>                                      <span class='hs-comment'>-- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]</span>
<a name="line-7799"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7800"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7801"></a>                                      <span class='hs-comment'>-- The output tensor has shape `[1, 2, 2, 1]` and value:</span>
<a name="line-7802"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7803"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7804"></a>                                      <span class='hs-comment'>-- x = [[[[1], [2]], [[3], [4]]]]</span>
<a name="line-7805"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7806"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7807"></a>                                      <span class='hs-comment'>-- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and</span>
<a name="line-7808"></a>                                      <span class='hs-comment'>--     `crops = [[0, 0], [0, 0]]`:</span>
<a name="line-7809"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7810"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7811"></a>                                      <span class='hs-comment'>-- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]</span>
<a name="line-7812"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7813"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7814"></a>                                      <span class='hs-comment'>-- The output tensor has shape `[1, 2, 2, 3]` and value:</span>
<a name="line-7815"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7816"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7817"></a>                                      <span class='hs-comment'>-- x = [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-7818"></a>                                      <span class='hs-comment'>--       [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-7819"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7820"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7821"></a>                                      <span class='hs-comment'>-- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and</span>
<a name="line-7822"></a>                                      <span class='hs-comment'>--     `crops = [[0, 0], [0, 0]]`:</span>
<a name="line-7823"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7824"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7825"></a>                                      <span class='hs-comment'>-- x = [[[[1], [3]], [[5], [7]]],</span>
<a name="line-7826"></a>                                      <span class='hs-comment'>--      [[[2], [4]], [[10], [12]]],</span>
<a name="line-7827"></a>                                      <span class='hs-comment'>--      [[[5], [7]], [[13], [15]]],</span>
<a name="line-7828"></a>                                      <span class='hs-comment'>--      [[[6], [8]], [[14], [16]]]]</span>
<a name="line-7829"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7830"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7831"></a>                                      <span class='hs-comment'>-- The output tensor has shape `[1, 4, 4, 1]` and value:</span>
<a name="line-7832"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7833"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7834"></a>                                      <span class='hs-comment'>-- x = [[[1],   [2],  [3],  [4]],</span>
<a name="line-7835"></a>                                      <span class='hs-comment'>--      [[5],   [6],  [7],  [8]],</span>
<a name="line-7836"></a>                                      <span class='hs-comment'>--      [[9],  [10], [11],  [12]],</span>
<a name="line-7837"></a>                                      <span class='hs-comment'>--      [[13], [14], [15],  [16]]]</span>
<a name="line-7838"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7839"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7840"></a>                                      <span class='hs-comment'>-- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and</span>
<a name="line-7841"></a>                                      <span class='hs-comment'>--     `crops = [[0, 0], [2, 0]]`:</span>
<a name="line-7842"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7843"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7844"></a>                                      <span class='hs-comment'>-- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],</span>
<a name="line-7845"></a>                                      <span class='hs-comment'>--      [[[0], [2], [4]]], [[[0], [10], [12]]],</span>
<a name="line-7846"></a>                                      <span class='hs-comment'>--      [[[0], [5], [7]]], [[[0], [13], [15]]],</span>
<a name="line-7847"></a>                                      <span class='hs-comment'>--      [[[0], [6], [8]]], [[[0], [14], [16]]]]</span>
<a name="line-7848"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7849"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7850"></a>                                      <span class='hs-comment'>-- The output tensor has shape `[2, 2, 4, 1]` and value:</span>
<a name="line-7851"></a>                                      <span class='hs-comment'>-- </span>
<a name="line-7852"></a>                                      <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7853"></a>                                      <span class='hs-comment'>-- x = [[[[1],   [2],  [3],  [4]],</span>
<a name="line-7854"></a>                                      <span class='hs-comment'>--       [[5],   [6],  [7],  [8]]],</span>
<a name="line-7855"></a>                                      <span class='hs-comment'>--      [[[9],  [10], [11],  [12]],</span>
<a name="line-7856"></a>                                      <span class='hs-comment'>--       [[13], [14], [15],  [16]]]]</span>
<a name="line-7857"></a>                                      <span class='hs-comment'>-- ```</span>
<a name="line-7858"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-7859"></a><span class='hs-definition'>batchToSpaceND</span> <span class='hs-varid'>input</span> <span class='hs-varid'>block_shape</span> <span class='hs-varid'>crops</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7860"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchToSpaceND"</span>
<a name="line-7861"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-7862"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tblock_shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>)</span>
<a name="line-7863"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tcrops"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tcrops</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-7864"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>block_shape</span> <span class='hs-varid'>crops</span>
<a name="line-7865"></a><span class='hs-comment'>{-
<a name="line-7866"></a>attr { name: "T" type: "type" }
<a name="line-7867"></a>attr {
<a name="line-7868"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-7869"></a>  default_value { type: DT_INT32 }
<a name="line-7870"></a>  name: "Tblock_shape"
<a name="line-7871"></a>  type: "type"
<a name="line-7872"></a>}
<a name="line-7873"></a>attr {
<a name="line-7874"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-7875"></a>  default_value { type: DT_INT32 }
<a name="line-7876"></a>  name: "Tcrops"
<a name="line-7877"></a>  type: "type"
<a name="line-7878"></a>}
<a name="line-7879"></a>input_arg {
<a name="line-7880"></a>  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions."
<a name="line-7881"></a>  name: "input"
<a name="line-7882"></a>  type_attr: "T"
<a name="line-7883"></a>}
<a name="line-7884"></a>input_arg {
<a name="line-7885"></a>  description: "1-D with shape `[M]`, all values must be &gt;= 1."
<a name="line-7886"></a>  name: "block_shape"
<a name="line-7887"></a>  type_attr: "Tblock_shape"
<a name="line-7888"></a>}
<a name="line-7889"></a>input_arg {
<a name="line-7890"></a>  description: "2-D with shape `[M, 2]`, all values must be &gt;= 0.\n  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is\n  required that\n  `crop_start[i] + crop_end[i] &lt;= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n     [block_shape[0], ..., block_shape[M-1],\n      batch / prod(block_shape),\n      input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1], block_shape[0],\n      ...,\n      input_shape[M], block_shape[M-1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0],\n      ...,\n      input_shape[M] * block_shape[M-1],\n\n      input_shape[M+1],\n      ...,\n      input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n   `reshaped_permuted` according to `crops` to produce the output of shape:\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n      ...,\n      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```"
<a name="line-7891"></a>  name: "crops"
<a name="line-7892"></a>  type_attr: "Tcrops"
<a name="line-7893"></a>}
<a name="line-7894"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-7895"></a>-}</span>
<a name="line-7896"></a>
<a name="line-7897"></a><a name="spaceToBatch"></a><span class='hs-comment'>-- | SpaceToBatch for 4-D tensors of type T.</span>
<a name="line-7898"></a><span class='hs-comment'>--</span>
<a name="line-7899"></a><span class='hs-comment'>-- This is a legacy version of the more general SpaceToBatchND.</span>
<a name="line-7900"></a><span class='hs-comment'>-- </span>
<a name="line-7901"></a><span class='hs-comment'>-- Zero-pads and then rearranges (permutes) blocks of spatial data into batch.</span>
<a name="line-7902"></a><span class='hs-comment'>-- More specifically, this op outputs a copy of the input tensor where values from</span>
<a name="line-7903"></a><span class='hs-comment'>-- the `height` and `width` dimensions are moved to the `batch` dimension. After</span>
<a name="line-7904"></a><span class='hs-comment'>-- the zero-padding, both `height` and `width` of the input must be divisible by the</span>
<a name="line-7905"></a><span class='hs-comment'>-- block size.</span>
<a name="line-7906"></a><span class='hs-definition'>spaceToBatch</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tpaddings</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>,</span>
<a name="line-7907"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-7908"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-7909"></a>                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __block_size__</span>
<a name="line-7910"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, height, width, depth]`.</span>
<a name="line-7911"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tpaddings</span> <span class='hs-comment'>-- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies</span>
<a name="line-7912"></a>                                       <span class='hs-comment'>--   the padding of the input with zeros across the spatial dimensions as follows:</span>
<a name="line-7913"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7914"></a>                                       <span class='hs-comment'>--       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]</span>
<a name="line-7915"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7916"></a>                                       <span class='hs-comment'>--   The effective spatial dimensions of the zero-padded input tensor will be:</span>
<a name="line-7917"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7918"></a>                                       <span class='hs-comment'>--       height_pad = pad_top + height + pad_bottom</span>
<a name="line-7919"></a>                                       <span class='hs-comment'>--       width_pad = pad_left + width + pad_right</span>
<a name="line-7920"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7921"></a>                                       <span class='hs-comment'>-- The attr `block_size` must be greater than one. It indicates the block size.</span>
<a name="line-7922"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7923"></a>                                       <span class='hs-comment'>--   * Non-overlapping blocks of size `block_size x block size` in the height and</span>
<a name="line-7924"></a>                                       <span class='hs-comment'>--     width dimensions are rearranged into the batch dimension at each location.</span>
<a name="line-7925"></a>                                       <span class='hs-comment'>--   * The batch of the output tensor is `batch * block_size * block_size`.</span>
<a name="line-7926"></a>                                       <span class='hs-comment'>--   * Both height_pad and width_pad must be divisible by block_size.</span>
<a name="line-7927"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7928"></a>                                       <span class='hs-comment'>-- The shape of the output will be:</span>
<a name="line-7929"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7930"></a>                                       <span class='hs-comment'>--     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,</span>
<a name="line-7931"></a>                                       <span class='hs-comment'>--      depth]</span>
<a name="line-7932"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7933"></a>                                       <span class='hs-comment'>-- Some examples:</span>
<a name="line-7934"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7935"></a>                                       <span class='hs-comment'>-- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:</span>
<a name="line-7936"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7937"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7938"></a>                                       <span class='hs-comment'>-- x = [[[[1], [2]], [[3], [4]]]]</span>
<a name="line-7939"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7940"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7941"></a>                                       <span class='hs-comment'>-- The output tensor has shape `[4, 1, 1, 1]` and value:</span>
<a name="line-7942"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7943"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7944"></a>                                       <span class='hs-comment'>-- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]</span>
<a name="line-7945"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7946"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7947"></a>                                       <span class='hs-comment'>-- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:</span>
<a name="line-7948"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7949"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7950"></a>                                       <span class='hs-comment'>-- x = [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-7951"></a>                                       <span class='hs-comment'>--       [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-7952"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7953"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7954"></a>                                       <span class='hs-comment'>-- The output tensor has shape `[4, 1, 1, 3]` and value:</span>
<a name="line-7955"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7956"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7957"></a>                                       <span class='hs-comment'>-- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]</span>
<a name="line-7958"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7959"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7960"></a>                                       <span class='hs-comment'>-- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:</span>
<a name="line-7961"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7962"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7963"></a>                                       <span class='hs-comment'>-- x = [[[[1],   [2],  [3],  [4]],</span>
<a name="line-7964"></a>                                       <span class='hs-comment'>--       [[5],   [6],  [7],  [8]],</span>
<a name="line-7965"></a>                                       <span class='hs-comment'>--       [[9],  [10], [11],  [12]],</span>
<a name="line-7966"></a>                                       <span class='hs-comment'>--       [[13], [14], [15],  [16]]]]</span>
<a name="line-7967"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7968"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7969"></a>                                       <span class='hs-comment'>-- The output tensor has shape `[4, 2, 2, 1]` and value:</span>
<a name="line-7970"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7971"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7972"></a>                                       <span class='hs-comment'>-- x = [[[[1], [3]], [[5], [7]]],</span>
<a name="line-7973"></a>                                       <span class='hs-comment'>--      [[[2], [4]], [[10], [12]]],</span>
<a name="line-7974"></a>                                       <span class='hs-comment'>--      [[[5], [7]], [[13], [15]]],</span>
<a name="line-7975"></a>                                       <span class='hs-comment'>--      [[[6], [8]], [[14], [16]]]]</span>
<a name="line-7976"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7977"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7978"></a>                                       <span class='hs-comment'>-- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:</span>
<a name="line-7979"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7980"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7981"></a>                                       <span class='hs-comment'>-- x = [[[[1],   [2],  [3],  [4]],</span>
<a name="line-7982"></a>                                       <span class='hs-comment'>--       [[5],   [6],  [7],  [8]]],</span>
<a name="line-7983"></a>                                       <span class='hs-comment'>--      [[[9],  [10], [11],  [12]],</span>
<a name="line-7984"></a>                                       <span class='hs-comment'>--       [[13], [14], [15],  [16]]]]</span>
<a name="line-7985"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7986"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7987"></a>                                       <span class='hs-comment'>-- The output tensor has shape `[8, 1, 2, 1]` and value:</span>
<a name="line-7988"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7989"></a>                                       <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-7990"></a>                                       <span class='hs-comment'>-- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],</span>
<a name="line-7991"></a>                                       <span class='hs-comment'>--      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]</span>
<a name="line-7992"></a>                                       <span class='hs-comment'>-- ```</span>
<a name="line-7993"></a>                                       <span class='hs-comment'>-- </span>
<a name="line-7994"></a>                                       <span class='hs-comment'>-- Among others, this operation is useful for reducing atrous convolution into</span>
<a name="line-7995"></a>                                       <span class='hs-comment'>-- regular convolution.</span>
<a name="line-7996"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-7997"></a><span class='hs-definition'>spaceToBatch</span> <span class='hs-varid'>block_size</span> <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-7998"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SpaceToBatch"</span>
<a name="line-7999"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8000"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tpaddings"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span>
<a name="line-8001"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"block_size"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>block_size</span><span class='hs-layout'>)</span>
<a name="line-8002"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span>
<a name="line-8003"></a><span class='hs-comment'>{-
<a name="line-8004"></a>attr { name: "T" type: "type" }
<a name="line-8005"></a>attr {
<a name="line-8006"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8007"></a>  default_value { type: DT_INT32 }
<a name="line-8008"></a>  name: "Tpaddings"
<a name="line-8009"></a>  type: "type"
<a name="line-8010"></a>}
<a name="line-8011"></a>attr {
<a name="line-8012"></a>  has_minimum: true minimum: 2 name: "block_size" type: "int"
<a name="line-8013"></a>}
<a name="line-8014"></a>input_arg {
<a name="line-8015"></a>  description: "4-D with shape `[batch, height, width, depth]`."
<a name="line-8016"></a>  name: "input"
<a name="line-8017"></a>  type_attr: "T"
<a name="line-8018"></a>}
<a name="line-8019"></a>input_arg {
<a name="line-8020"></a>  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n  the padding of the input with zeros across the spatial dimensions as follows:\n\n      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n  The effective spatial dimensions of the zero-padded input tensor will be:\n\n      height_pad = pad_top + height + pad_bottom\n      width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n  * Non-overlapping blocks of size `block_size x block size` in the height and\n    width dimensions are rearranged into the batch dimension at each location.\n  * The batch of the output tensor is `batch * block_size * block_size`.\n  * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n     depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
<a name="line-8021"></a>  name: "paddings"
<a name="line-8022"></a>  type_attr: "Tpaddings"
<a name="line-8023"></a>}
<a name="line-8024"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8025"></a>-}</span>
<a name="line-8026"></a>
<a name="line-8027"></a><a name="adjustHue"></a><span class='hs-comment'>-- | Adjust the hue of one or more images.</span>
<a name="line-8028"></a><span class='hs-comment'>--</span>
<a name="line-8029"></a><span class='hs-comment'>-- `images` is a tensor of at least 3 dimensions.  The last dimension is</span>
<a name="line-8030"></a><span class='hs-comment'>-- interpretted as channels, and must be three.</span>
<a name="line-8031"></a><span class='hs-comment'>-- </span>
<a name="line-8032"></a><span class='hs-comment'>-- The input image is considered in the RGB colorspace. Conceptually, the RGB</span>
<a name="line-8033"></a><span class='hs-comment'>-- colors are first mapped into HSV. A delta is then applied all the hue values,</span>
<a name="line-8034"></a><span class='hs-comment'>-- and then remapped back to RGB colorspace.</span>
<a name="line-8035"></a><span class='hs-definition'>adjustHue</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __images__: Images to adjust.  At least 3-D.</span>
<a name="line-8036"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __delta__: A float delta to add to the hue.</span>
<a name="line-8037"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__: The hue-adjusted image or images.</span>
<a name="line-8038"></a><span class='hs-definition'>adjustHue</span> <span class='hs-varid'>images</span> <span class='hs-varid'>delta</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8039"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AdjustHue"</span><span class='hs-layout'>)</span>
<a name="line-8040"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>delta</span>
<a name="line-8041"></a><span class='hs-comment'>{-
<a name="line-8042"></a>input_arg {
<a name="line-8043"></a>  description: "Images to adjust.  At least 3-D."
<a name="line-8044"></a>  name: "images"
<a name="line-8045"></a>  type: DT_FLOAT
<a name="line-8046"></a>}
<a name="line-8047"></a>input_arg {
<a name="line-8048"></a>  description: "A float delta to add to the hue."
<a name="line-8049"></a>  name: "delta"
<a name="line-8050"></a>  type: DT_FLOAT
<a name="line-8051"></a>}
<a name="line-8052"></a>output_arg {
<a name="line-8053"></a>  description: "The hue-adjusted image or images."
<a name="line-8054"></a>  name: "output"
<a name="line-8055"></a>  type: DT_FLOAT
<a name="line-8056"></a>}
<a name="line-8057"></a>-}</span>
<a name="line-8058"></a>
<a name="line-8059"></a><a name="spaceToBatchND"></a><span class='hs-comment'>-- | SpaceToBatch for N-D tensors of type T.</span>
<a name="line-8060"></a><span class='hs-comment'>--</span>
<a name="line-8061"></a><span class='hs-comment'>-- This operation divides "spatial" dimensions `[1, ..., M]` of the input into a</span>
<a name="line-8062"></a><span class='hs-comment'>-- grid of blocks of shape `block_shape`, and interleaves these blocks with the</span>
<a name="line-8063"></a><span class='hs-comment'>-- "batch" dimension (0) such that in the output, the spatial dimensions</span>
<a name="line-8064"></a><span class='hs-comment'>-- `[1, ..., M]` correspond to the position within the grid, and the batch</span>
<a name="line-8065"></a><span class='hs-comment'>-- dimension combines both the position within a spatial block and the original</span>
<a name="line-8066"></a><span class='hs-comment'>-- batch position.  Prior to division into blocks, the spatial dimensions of the</span>
<a name="line-8067"></a><span class='hs-comment'>-- input are optionally zero padded according to `paddings`.  See below for a</span>
<a name="line-8068"></a><span class='hs-comment'>-- precise description.</span>
<a name="line-8069"></a><span class='hs-definition'>spaceToBatchND</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tblock_shape</span> <span class='hs-varid'>tpaddings</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-8070"></a>                                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>,</span>
<a name="line-8071"></a>                                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8072"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>,</span>
<a name="line-8073"></a>                                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>,</span>
<a name="line-8074"></a>                                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8075"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8076"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,</span>
<a name="line-8077"></a>                              <span class='hs-comment'>-- where spatial_shape has `M` dimensions.</span>
<a name="line-8078"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tblock_shape</span> <span class='hs-comment'>-- ^ __block_shape__: 1-D with shape `[M]`, all values must be &gt;= 1.</span>
<a name="line-8079"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tpaddings</span> <span class='hs-comment'>-- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be &gt;= 0.</span>
<a name="line-8080"></a>                                         <span class='hs-comment'>--   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension</span>
<a name="line-8081"></a>                                         <span class='hs-comment'>--   `i + 1`, which corresponds to spatial dimension `i`.  It is required that</span>
<a name="line-8082"></a>                                         <span class='hs-comment'>--   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.</span>
<a name="line-8083"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8084"></a>                                         <span class='hs-comment'>-- This operation is equivalent to the following steps:</span>
<a name="line-8085"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8086"></a>                                         <span class='hs-comment'>-- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the</span>
<a name="line-8087"></a>                                         <span class='hs-comment'>--    input according to `paddings` to produce `padded` of shape `padded_shape`.</span>
<a name="line-8088"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8089"></a>                                         <span class='hs-comment'>-- 2. Reshape `padded` to `reshaped_padded` of shape:</span>
<a name="line-8090"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8091"></a>                                         <span class='hs-comment'>--      [batch] +</span>
<a name="line-8092"></a>                                         <span class='hs-comment'>--      [padded_shape[1] / block_shape[0],</span>
<a name="line-8093"></a>                                         <span class='hs-comment'>--        block_shape[0],</span>
<a name="line-8094"></a>                                         <span class='hs-comment'>--       ...,</span>
<a name="line-8095"></a>                                         <span class='hs-comment'>--       padded_shape[M] / block_shape[M-1],</span>
<a name="line-8096"></a>                                         <span class='hs-comment'>--       block_shape[M-1]] +</span>
<a name="line-8097"></a>                                         <span class='hs-comment'>--      remaining_shape</span>
<a name="line-8098"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8099"></a>                                         <span class='hs-comment'>-- 3. Permute dimensions of `reshaped_padded` to produce</span>
<a name="line-8100"></a>                                         <span class='hs-comment'>--    `permuted_reshaped_padded` of shape:</span>
<a name="line-8101"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8102"></a>                                         <span class='hs-comment'>--      block_shape +</span>
<a name="line-8103"></a>                                         <span class='hs-comment'>--      [batch] +</span>
<a name="line-8104"></a>                                         <span class='hs-comment'>--      [padded_shape[1] / block_shape[0],</span>
<a name="line-8105"></a>                                         <span class='hs-comment'>--       ...,</span>
<a name="line-8106"></a>                                         <span class='hs-comment'>--       padded_shape[M] / block_shape[M-1]] +</span>
<a name="line-8107"></a>                                         <span class='hs-comment'>--      remaining_shape</span>
<a name="line-8108"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8109"></a>                                         <span class='hs-comment'>-- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch</span>
<a name="line-8110"></a>                                         <span class='hs-comment'>--    dimension, producing an output tensor of shape:</span>
<a name="line-8111"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8112"></a>                                         <span class='hs-comment'>--      [batch * prod(block_shape)] +</span>
<a name="line-8113"></a>                                         <span class='hs-comment'>--      [padded_shape[1] / block_shape[0],</span>
<a name="line-8114"></a>                                         <span class='hs-comment'>--       ...,</span>
<a name="line-8115"></a>                                         <span class='hs-comment'>--       padded_shape[M] / block_shape[M-1]] +</span>
<a name="line-8116"></a>                                         <span class='hs-comment'>--      remaining_shape</span>
<a name="line-8117"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8118"></a>                                         <span class='hs-comment'>-- Some examples:</span>
<a name="line-8119"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8120"></a>                                         <span class='hs-comment'>-- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and</span>
<a name="line-8121"></a>                                         <span class='hs-comment'>--     `paddings = [[0, 0], [0, 0]]`:</span>
<a name="line-8122"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8123"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8124"></a>                                         <span class='hs-comment'>-- x = [[[[1], [2]], [[3], [4]]]]</span>
<a name="line-8125"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8126"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8127"></a>                                         <span class='hs-comment'>-- The output tensor has shape `[4, 1, 1, 1]` and value:</span>
<a name="line-8128"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8129"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8130"></a>                                         <span class='hs-comment'>-- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]</span>
<a name="line-8131"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8132"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8133"></a>                                         <span class='hs-comment'>-- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and</span>
<a name="line-8134"></a>                                         <span class='hs-comment'>--     `paddings = [[0, 0], [0, 0]]`:</span>
<a name="line-8135"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8136"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8137"></a>                                         <span class='hs-comment'>-- x = [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-8138"></a>                                         <span class='hs-comment'>--       [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-8139"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8140"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8141"></a>                                         <span class='hs-comment'>-- The output tensor has shape `[4, 1, 1, 3]` and value:</span>
<a name="line-8142"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8143"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8144"></a>                                         <span class='hs-comment'>-- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]</span>
<a name="line-8145"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8146"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8147"></a>                                         <span class='hs-comment'>-- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and</span>
<a name="line-8148"></a>                                         <span class='hs-comment'>--     `paddings = [[0, 0], [0, 0]]`:</span>
<a name="line-8149"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8150"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8151"></a>                                         <span class='hs-comment'>-- x = [[[[1],   [2],  [3],  [4]],</span>
<a name="line-8152"></a>                                         <span class='hs-comment'>--       [[5],   [6],  [7],  [8]],</span>
<a name="line-8153"></a>                                         <span class='hs-comment'>--       [[9],  [10], [11],  [12]],</span>
<a name="line-8154"></a>                                         <span class='hs-comment'>--       [[13], [14], [15],  [16]]]]</span>
<a name="line-8155"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8156"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8157"></a>                                         <span class='hs-comment'>-- The output tensor has shape `[4, 2, 2, 1]` and value:</span>
<a name="line-8158"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8159"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8160"></a>                                         <span class='hs-comment'>-- x = [[[[1], [3]], [[5], [7]]],</span>
<a name="line-8161"></a>                                         <span class='hs-comment'>--      [[[2], [4]], [[10], [12]]],</span>
<a name="line-8162"></a>                                         <span class='hs-comment'>--      [[[5], [7]], [[13], [15]]],</span>
<a name="line-8163"></a>                                         <span class='hs-comment'>--      [[[6], [8]], [[14], [16]]]]</span>
<a name="line-8164"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8165"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8166"></a>                                         <span class='hs-comment'>-- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and</span>
<a name="line-8167"></a>                                         <span class='hs-comment'>--     paddings = `[[0, 0], [2, 0]]`:</span>
<a name="line-8168"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8169"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8170"></a>                                         <span class='hs-comment'>-- x = [[[[1],   [2],  [3],  [4]],</span>
<a name="line-8171"></a>                                         <span class='hs-comment'>--       [[5],   [6],  [7],  [8]]],</span>
<a name="line-8172"></a>                                         <span class='hs-comment'>--      [[[9],  [10], [11],  [12]],</span>
<a name="line-8173"></a>                                         <span class='hs-comment'>--       [[13], [14], [15],  [16]]]]</span>
<a name="line-8174"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8175"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8176"></a>                                         <span class='hs-comment'>-- The output tensor has shape `[8, 1, 3, 1]` and value:</span>
<a name="line-8177"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8178"></a>                                         <span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8179"></a>                                         <span class='hs-comment'>-- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],</span>
<a name="line-8180"></a>                                         <span class='hs-comment'>--      [[[0], [2], [4]]], [[[0], [10], [12]]],</span>
<a name="line-8181"></a>                                         <span class='hs-comment'>--      [[[0], [5], [7]]], [[[0], [13], [15]]],</span>
<a name="line-8182"></a>                                         <span class='hs-comment'>--      [[[0], [6], [8]]], [[[0], [14], [16]]]]</span>
<a name="line-8183"></a>                                         <span class='hs-comment'>-- ```</span>
<a name="line-8184"></a>                                         <span class='hs-comment'>-- </span>
<a name="line-8185"></a>                                         <span class='hs-comment'>-- Among others, this operation is useful for reducing atrous convolution into</span>
<a name="line-8186"></a>                                         <span class='hs-comment'>-- regular convolution.</span>
<a name="line-8187"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-8188"></a><span class='hs-definition'>spaceToBatchND</span> <span class='hs-varid'>input</span> <span class='hs-varid'>block_shape</span> <span class='hs-varid'>paddings</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8189"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SpaceToBatchND"</span>
<a name="line-8190"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8191"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tblock_shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tblock_shape</span><span class='hs-layout'>)</span>
<a name="line-8192"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tpaddings"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8193"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>block_shape</span> <span class='hs-varid'>paddings</span>
<a name="line-8194"></a><span class='hs-comment'>{-
<a name="line-8195"></a>attr { name: "T" type: "type" }
<a name="line-8196"></a>attr {
<a name="line-8197"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8198"></a>  default_value { type: DT_INT32 }
<a name="line-8199"></a>  name: "Tblock_shape"
<a name="line-8200"></a>  type: "type"
<a name="line-8201"></a>}
<a name="line-8202"></a>attr {
<a name="line-8203"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8204"></a>  default_value { type: DT_INT32 }
<a name="line-8205"></a>  name: "Tpaddings"
<a name="line-8206"></a>  type: "type"
<a name="line-8207"></a>}
<a name="line-8208"></a>input_arg {
<a name="line-8209"></a>  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions."
<a name="line-8210"></a>  name: "input"
<a name="line-8211"></a>  type_attr: "T"
<a name="line-8212"></a>}
<a name="line-8213"></a>input_arg {
<a name="line-8214"></a>  description: "1-D with shape `[M]`, all values must be &gt;= 1."
<a name="line-8215"></a>  name: "block_shape"
<a name="line-8216"></a>  type_attr: "Tblock_shape"
<a name="line-8217"></a>}
<a name="line-8218"></a>input_arg {
<a name="line-8219"></a>  description: "2-D with shape `[M, 2]`, all values must be &gt;= 0.\n  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n  `i + 1`, which corresponds to spatial dimension `i`.  It is required that\n  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n   input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n       block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1],\n      block_shape[M-1]] +\n     remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n   `permuted_reshaped_padded` of shape:\n\n     block_shape +\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n   dimension, producing an output tensor of shape:\n\n     [batch * prod(block_shape)] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n    paddings = `[[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
<a name="line-8220"></a>  name: "paddings"
<a name="line-8221"></a>  type_attr: "Tpaddings"
<a name="line-8222"></a>}
<a name="line-8223"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8224"></a>-}</span>
<a name="line-8225"></a>
<a name="line-8226"></a><a name="diagPart"></a><span class='hs-comment'>-- | Returns the diagonal part of the tensor.</span>
<a name="line-8227"></a><span class='hs-comment'>--</span>
<a name="line-8228"></a><span class='hs-comment'>-- This operation returns a tensor with the `diagonal` part</span>
<a name="line-8229"></a><span class='hs-comment'>-- of the `input`. The `diagonal` part is computed as follows:</span>
<a name="line-8230"></a><span class='hs-comment'>-- </span>
<a name="line-8231"></a><span class='hs-comment'>-- Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a</span>
<a name="line-8232"></a><span class='hs-comment'>-- tensor of rank `k` with dimensions `[D1,..., Dk]` where:</span>
<a name="line-8233"></a><span class='hs-comment'>-- </span>
<a name="line-8234"></a><span class='hs-comment'>-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.</span>
<a name="line-8235"></a><span class='hs-comment'>-- </span>
<a name="line-8236"></a><span class='hs-comment'>-- For example:</span>
<a name="line-8237"></a><span class='hs-comment'>-- </span>
<a name="line-8238"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8239"></a><span class='hs-comment'>-- # 'input' is [[1, 0, 0, 0]</span>
<a name="line-8240"></a><span class='hs-comment'>--               [0, 2, 0, 0]</span>
<a name="line-8241"></a><span class='hs-comment'>--               [0, 0, 3, 0]</span>
<a name="line-8242"></a><span class='hs-comment'>--               [0, 0, 0, 4]]</span>
<a name="line-8243"></a><span class='hs-comment'>-- </span>
<a name="line-8244"></a><span class='hs-comment'>-- tf.diag_part(input) ==&gt; [1, 2, 3, 4]</span>
<a name="line-8245"></a><span class='hs-comment'>-- ```</span>
<a name="line-8246"></a><span class='hs-definition'>diagPart</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8247"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8248"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-8249"></a>                                                 <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8250"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Rank k tensor where k is 2, 4, or 6.</span>
<a name="line-8251"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__: The extracted diagonal.</span>
<a name="line-8252"></a><span class='hs-definition'>diagPart</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8253"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DiagPart"</span>
<a name="line-8254"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8255"></a>        <span class='hs-varid'>input</span>
<a name="line-8256"></a><span class='hs-comment'>{-
<a name="line-8257"></a>attr {
<a name="line-8258"></a>  allowed_values {
<a name="line-8259"></a>    list {
<a name="line-8260"></a>      type: DT_FLOAT
<a name="line-8261"></a>      type: DT_DOUBLE
<a name="line-8262"></a>      type: DT_INT32
<a name="line-8263"></a>      type: DT_INT64
<a name="line-8264"></a>      type: DT_COMPLEX64
<a name="line-8265"></a>      type: DT_COMPLEX128
<a name="line-8266"></a>    }
<a name="line-8267"></a>  }
<a name="line-8268"></a>  name: "T"
<a name="line-8269"></a>  type: "type"
<a name="line-8270"></a>}
<a name="line-8271"></a>input_arg {
<a name="line-8272"></a>  description: "Rank k tensor where k is 2, 4, or 6."
<a name="line-8273"></a>  name: "input"
<a name="line-8274"></a>  type_attr: "T"
<a name="line-8275"></a>}
<a name="line-8276"></a>output_arg {
<a name="line-8277"></a>  description: "The extracted diagonal."
<a name="line-8278"></a>  name: "diagonal"
<a name="line-8279"></a>  type_attr: "T"
<a name="line-8280"></a>}
<a name="line-8281"></a>-}</span>
<a name="line-8282"></a>
<a name="line-8283"></a><a name="placeholderV2"></a><span class='hs-comment'>-- | A placeholder op for a value that will be fed into the computation.</span>
<a name="line-8284"></a><span class='hs-comment'>--</span>
<a name="line-8285"></a><span class='hs-comment'>-- N.B. This operation will fail with an error if it is executed. It is</span>
<a name="line-8286"></a><span class='hs-comment'>-- intended as a way to represent a value that will always be fed, and to</span>
<a name="line-8287"></a><span class='hs-comment'>-- provide attrs that enable the fed value to be checked at runtime.</span>
<a name="line-8288"></a><span class='hs-definition'>placeholderV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8289"></a>                 <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: The shape of the tensor. The shape can be any partially-specified</span>
<a name="line-8290"></a>                       <span class='hs-comment'>-- shape.  To be unconstrained, pass in a shape with unknown rank.</span>
<a name="line-8291"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.</span>
<a name="line-8292"></a><span class='hs-definition'>placeholderV2</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8293"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"PlaceholderV2"</span>
<a name="line-8294"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-8295"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-8296"></a>        
<a name="line-8297"></a><span class='hs-comment'>{-
<a name="line-8298"></a>attr {
<a name="line-8299"></a>  description: "The type of elements in the tensor."
<a name="line-8300"></a>  name: "dtype"
<a name="line-8301"></a>  type: "type"
<a name="line-8302"></a>}
<a name="line-8303"></a>attr {
<a name="line-8304"></a>  description: "The shape of the tensor. The shape can be any partially-specified\nshape.  To be unconstrained, pass in a shape with unknown rank."
<a name="line-8305"></a>  name: "shape"
<a name="line-8306"></a>  type: "shape"
<a name="line-8307"></a>}
<a name="line-8308"></a>output_arg {
<a name="line-8309"></a>  description: "A placeholder tensor that must be replaced using the feed mechanism."
<a name="line-8310"></a>  name: "output"
<a name="line-8311"></a>  type_attr: "dtype"
<a name="line-8312"></a>}
<a name="line-8313"></a>-}</span>
<a name="line-8314"></a>
<a name="line-8315"></a><span class='hs-comment'>-- | Computes acos of x element-wise.</span>
<a name="line-8316"></a>
<a name="line-8317"></a><a name="acos"></a><span class='hs-definition'>acos</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8318"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8319"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-8320"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-8321"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-8322"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-8323"></a><span class='hs-definition'>acos</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8324"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Acos"</span>
<a name="line-8325"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8326"></a>        <span class='hs-varid'>x</span>
<a name="line-8327"></a><span class='hs-comment'>{-
<a name="line-8328"></a>attr {
<a name="line-8329"></a>  allowed_values {
<a name="line-8330"></a>    list {
<a name="line-8331"></a>      type: DT_HALF
<a name="line-8332"></a>      type: DT_FLOAT
<a name="line-8333"></a>      type: DT_DOUBLE
<a name="line-8334"></a>      type: DT_INT32
<a name="line-8335"></a>      type: DT_INT64
<a name="line-8336"></a>      type: DT_COMPLEX64
<a name="line-8337"></a>      type: DT_COMPLEX128
<a name="line-8338"></a>    }
<a name="line-8339"></a>  }
<a name="line-8340"></a>  name: "T"
<a name="line-8341"></a>  type: "type"
<a name="line-8342"></a>}
<a name="line-8343"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-8344"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-8345"></a>-}</span>
<a name="line-8346"></a>
<a name="line-8347"></a><a name="placeholder"></a><span class='hs-comment'>-- | A placeholder op for a value that will be fed into the computation.</span>
<a name="line-8348"></a><span class='hs-comment'>--</span>
<a name="line-8349"></a><span class='hs-comment'>-- N.B. This operation will fail with an error if it is executed. It is</span>
<a name="line-8350"></a><span class='hs-comment'>-- intended as a way to represent a value that will always be fed, and to</span>
<a name="line-8351"></a><span class='hs-comment'>-- provide attrs that enable the fed value to be checked at runtime.</span>
<a name="line-8352"></a><span class='hs-definition'>placeholder</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8353"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.</span>
<a name="line-8354"></a><span class='hs-definition'>placeholder</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8355"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Placeholder"</span>
<a name="line-8356"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8357"></a>        
<a name="line-8358"></a><span class='hs-comment'>{-
<a name="line-8359"></a>attr {
<a name="line-8360"></a>  description: "The type of elements in the tensor."
<a name="line-8361"></a>  name: "dtype"
<a name="line-8362"></a>  type: "type"
<a name="line-8363"></a>}
<a name="line-8364"></a>attr {
<a name="line-8365"></a>  default_value { shape { } }
<a name="line-8366"></a>  description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained."
<a name="line-8367"></a>  name: "shape"
<a name="line-8368"></a>  type: "shape"
<a name="line-8369"></a>}
<a name="line-8370"></a>output_arg {
<a name="line-8371"></a>  description: "A placeholder tensor that must be replaced using the feed mechanism."
<a name="line-8372"></a>  name: "output"
<a name="line-8373"></a>  type_attr: "dtype"
<a name="line-8374"></a>}
<a name="line-8375"></a>-}</span>
<a name="line-8376"></a>
<a name="line-8377"></a><a name="controlTrigger"></a><span class='hs-comment'>-- | Does nothing. Serves as a control trigger for scheduling.</span>
<a name="line-8378"></a><span class='hs-comment'>--</span>
<a name="line-8379"></a><span class='hs-comment'>-- Only useful as a placeholder for control edges.</span>
<a name="line-8380"></a><span class='hs-definition'>controlTrigger</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>ControlNode</span>
<a name="line-8381"></a><span class='hs-definition'>controlTrigger</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8382"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ControlTrigger"</span><span class='hs-layout'>)</span>
<a name="line-8383"></a>        
<a name="line-8384"></a><span class='hs-comment'>{-
<a name="line-8385"></a>
<a name="line-8386"></a>-}</span>
<a name="line-8387"></a>
<a name="line-8388"></a><span class='hs-comment'>-- | Computes atan of x element-wise.</span>
<a name="line-8389"></a>
<a name="line-8390"></a><a name="atan"></a><span class='hs-definition'>atan</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8391"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8392"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-8393"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-8394"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-8395"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-8396"></a><span class='hs-definition'>atan</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8397"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Atan"</span>
<a name="line-8398"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8399"></a>        <span class='hs-varid'>x</span>
<a name="line-8400"></a><span class='hs-comment'>{-
<a name="line-8401"></a>attr {
<a name="line-8402"></a>  allowed_values {
<a name="line-8403"></a>    list {
<a name="line-8404"></a>      type: DT_HALF
<a name="line-8405"></a>      type: DT_FLOAT
<a name="line-8406"></a>      type: DT_DOUBLE
<a name="line-8407"></a>      type: DT_INT32
<a name="line-8408"></a>      type: DT_INT64
<a name="line-8409"></a>      type: DT_COMPLEX64
<a name="line-8410"></a>      type: DT_COMPLEX128
<a name="line-8411"></a>    }
<a name="line-8412"></a>  }
<a name="line-8413"></a>  name: "T"
<a name="line-8414"></a>  type: "type"
<a name="line-8415"></a>}
<a name="line-8416"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-8417"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-8418"></a>-}</span>
<a name="line-8419"></a>
<a name="line-8420"></a><a name="mirrorPad"></a><span class='hs-comment'>-- | Pads a tensor with mirrored values.</span>
<a name="line-8421"></a><span class='hs-comment'>--</span>
<a name="line-8422"></a><span class='hs-comment'>-- This operation pads a `input` with mirrored values according to the `paddings`</span>
<a name="line-8423"></a><span class='hs-comment'>-- you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is</span>
<a name="line-8424"></a><span class='hs-comment'>-- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates</span>
<a name="line-8425"></a><span class='hs-comment'>-- how many values to add before the contents of `input` in that dimension, and</span>
<a name="line-8426"></a><span class='hs-comment'>-- `paddings[D, 1]` indicates how many values to add after the contents of `input`</span>
<a name="line-8427"></a><span class='hs-comment'>-- in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater</span>
<a name="line-8428"></a><span class='hs-comment'>-- than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true</span>
<a name="line-8429"></a><span class='hs-comment'>-- (if false, respectively).</span>
<a name="line-8430"></a><span class='hs-comment'>-- </span>
<a name="line-8431"></a><span class='hs-comment'>-- The padded size of each dimension D of the output is:</span>
<a name="line-8432"></a><span class='hs-comment'>-- </span>
<a name="line-8433"></a><span class='hs-comment'>-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`</span>
<a name="line-8434"></a><span class='hs-comment'>-- </span>
<a name="line-8435"></a><span class='hs-comment'>-- For example:</span>
<a name="line-8436"></a><span class='hs-comment'>-- </span>
<a name="line-8437"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8438"></a><span class='hs-comment'>-- # 't' is [[1, 2, 3], [4, 5, 6]].</span>
<a name="line-8439"></a><span class='hs-comment'>-- # 'paddings' is [[1, 1]], [2, 2]].</span>
<a name="line-8440"></a><span class='hs-comment'>-- # 'mode' is SYMMETRIC.</span>
<a name="line-8441"></a><span class='hs-comment'>-- # rank of 't' is 2.</span>
<a name="line-8442"></a><span class='hs-comment'>-- pad(t, paddings) ==&gt; [[2, 1, 1, 2, 3, 3, 2]</span>
<a name="line-8443"></a><span class='hs-comment'>--                       [2, 1, 1, 2, 3, 3, 2]</span>
<a name="line-8444"></a><span class='hs-comment'>--                       [5, 4, 4, 5, 6, 6, 5]</span>
<a name="line-8445"></a><span class='hs-comment'>--                       [5, 4, 4, 5, 6, 6, 5]]</span>
<a name="line-8446"></a><span class='hs-comment'>-- ```</span>
<a name="line-8447"></a><span class='hs-definition'>mirrorPad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tpaddings</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>,</span>
<a name="line-8448"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8449"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8450"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The input tensor to be padded.</span>
<a name="line-8451"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tpaddings</span> <span class='hs-comment'>-- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of</span>
<a name="line-8452"></a>                                    <span class='hs-comment'>-- rows must be the same as the rank of `input`.</span>
<a name="line-8453"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The padded tensor.</span>
<a name="line-8454"></a><span class='hs-definition'>mirrorPad</span> <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8455"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MirrorPad"</span>
<a name="line-8456"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8457"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tpaddings"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8458"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span>
<a name="line-8459"></a><span class='hs-comment'>{-
<a name="line-8460"></a>attr { name: "T" type: "type" }
<a name="line-8461"></a>attr {
<a name="line-8462"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8463"></a>  default_value { type: DT_INT32 }
<a name="line-8464"></a>  name: "Tpaddings"
<a name="line-8465"></a>  type: "type"
<a name="line-8466"></a>}
<a name="line-8467"></a>attr {
<a name="line-8468"></a>  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
<a name="line-8469"></a>  description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode."
<a name="line-8470"></a>  name: "mode"
<a name="line-8471"></a>  type: "string"
<a name="line-8472"></a>}
<a name="line-8473"></a>input_arg {
<a name="line-8474"></a>  description: "The input tensor to be padded."
<a name="line-8475"></a>  name: "input"
<a name="line-8476"></a>  type_attr: "T"
<a name="line-8477"></a>}
<a name="line-8478"></a>input_arg {
<a name="line-8479"></a>  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
<a name="line-8480"></a>  name: "paddings"
<a name="line-8481"></a>  type_attr: "Tpaddings"
<a name="line-8482"></a>}
<a name="line-8483"></a>output_arg {
<a name="line-8484"></a>  description: "The padded tensor." name: "output" type_attr: "T"
<a name="line-8485"></a>}
<a name="line-8486"></a>-}</span>
<a name="line-8487"></a>
<a name="line-8488"></a><a name="where'"></a><span class='hs-comment'>-- | Returns locations of true values in a boolean tensor.</span>
<a name="line-8489"></a><span class='hs-comment'>--</span>
<a name="line-8490"></a><span class='hs-comment'>-- This operation returns the coordinates of true elements in `input`. The</span>
<a name="line-8491"></a><span class='hs-comment'>-- coordinates are returned in a 2-D tensor where the first dimension (rows)</span>
<a name="line-8492"></a><span class='hs-comment'>-- represents the number of true elements, and the second dimension (columns)</span>
<a name="line-8493"></a><span class='hs-comment'>-- represents the coordinates of the true elements. Keep in mind, the shape of</span>
<a name="line-8494"></a><span class='hs-comment'>-- the output tensor can vary depending on how many true values there are in</span>
<a name="line-8495"></a><span class='hs-comment'>-- `input`. Indices are output in row-major order.</span>
<a name="line-8496"></a><span class='hs-comment'>-- </span>
<a name="line-8497"></a><span class='hs-comment'>-- For example:</span>
<a name="line-8498"></a><span class='hs-comment'>-- </span>
<a name="line-8499"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8500"></a><span class='hs-comment'>-- # 'input' tensor is [[True, False]</span>
<a name="line-8501"></a><span class='hs-comment'>-- #                    [True, False]]</span>
<a name="line-8502"></a><span class='hs-comment'>-- # 'input' has two true values, so output has two coordinates.</span>
<a name="line-8503"></a><span class='hs-comment'>-- # 'input' has rank of 2, so coordinates have two indices.</span>
<a name="line-8504"></a><span class='hs-comment'>-- where(input) ==&gt; [[0, 0],</span>
<a name="line-8505"></a><span class='hs-comment'>--                   [1, 0]]</span>
<a name="line-8506"></a><span class='hs-comment'>-- </span>
<a name="line-8507"></a><span class='hs-comment'>-- # `input` tensor is [[[True, False]</span>
<a name="line-8508"></a><span class='hs-comment'>-- #                     [True, False]]</span>
<a name="line-8509"></a><span class='hs-comment'>-- #                    [[False, True]</span>
<a name="line-8510"></a><span class='hs-comment'>-- #                     [False, True]]</span>
<a name="line-8511"></a><span class='hs-comment'>-- #                    [[False, False]</span>
<a name="line-8512"></a><span class='hs-comment'>-- #                     [False, True]]]</span>
<a name="line-8513"></a><span class='hs-comment'>-- # 'input' has 5 true values, so output has 5 coordinates.</span>
<a name="line-8514"></a><span class='hs-comment'>-- # 'input' has rank of 3, so coordinates have three indices.</span>
<a name="line-8515"></a><span class='hs-comment'>-- where(input) ==&gt; [[0, 0, 0],</span>
<a name="line-8516"></a><span class='hs-comment'>--                   [0, 1, 0],</span>
<a name="line-8517"></a><span class='hs-comment'>--                   [1, 0, 1],</span>
<a name="line-8518"></a><span class='hs-comment'>--                   [1, 1, 1],</span>
<a name="line-8519"></a><span class='hs-comment'>--                   [2, 1, 1]]</span>
<a name="line-8520"></a><span class='hs-comment'>-- ```</span>
<a name="line-8521"></a><span class='hs-definition'>where'</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-8522"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __index__</span>
<a name="line-8523"></a><span class='hs-definition'>where'</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8524"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Where"</span><span class='hs-layout'>)</span>
<a name="line-8525"></a>        <span class='hs-varid'>input</span>
<a name="line-8526"></a><span class='hs-comment'>{-
<a name="line-8527"></a>input_arg { name: "input" type: DT_BOOL }
<a name="line-8528"></a>output_arg { name: "index" type: DT_INT64 }
<a name="line-8529"></a>-}</span>
<a name="line-8530"></a>
<a name="line-8531"></a><span class='hs-comment'>-- | Computes gradients of average pooling function.</span>
<a name="line-8532"></a>
<a name="line-8533"></a><a name="avgPool3DGrad"></a><span class='hs-definition'>avgPool3DGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-8534"></a>                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8535"></a>                                           <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-8536"></a>                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8537"></a>                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-8538"></a>                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-8539"></a>                                           <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8540"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __orig_input_shape__: The original input dimensions.</span>
<a name="line-8541"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.</span>
<a name="line-8542"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The backprop for input.</span>
<a name="line-8543"></a><span class='hs-definition'>avgPool3DGrad</span> <span class='hs-varid'>orig_input_shape</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8544"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AvgPool3DGrad"</span>
<a name="line-8545"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8546"></a>        <span class='hs-varid'>orig_input_shape</span> <span class='hs-varid'>grad</span>
<a name="line-8547"></a><span class='hs-comment'>{-
<a name="line-8548"></a>attr {
<a name="line-8549"></a>  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
<a name="line-8550"></a>  has_minimum: true
<a name="line-8551"></a>  minimum: 5
<a name="line-8552"></a>  name: "ksize"
<a name="line-8553"></a>  type: "list(int)"
<a name="line-8554"></a>}
<a name="line-8555"></a>attr {
<a name="line-8556"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-8557"></a>  has_minimum: true
<a name="line-8558"></a>  minimum: 5
<a name="line-8559"></a>  name: "strides"
<a name="line-8560"></a>  type: "list(int)"
<a name="line-8561"></a>}
<a name="line-8562"></a>attr {
<a name="line-8563"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-8564"></a>  description: "The type of padding algorithm to use."
<a name="line-8565"></a>  name: "padding"
<a name="line-8566"></a>  type: "string"
<a name="line-8567"></a>}
<a name="line-8568"></a>attr {
<a name="line-8569"></a>  allowed_values {
<a name="line-8570"></a>    list {
<a name="line-8571"></a>      type: DT_FLOAT
<a name="line-8572"></a>      type: DT_DOUBLE
<a name="line-8573"></a>      type: DT_INT64
<a name="line-8574"></a>      type: DT_INT32
<a name="line-8575"></a>      type: DT_UINT8
<a name="line-8576"></a>      type: DT_UINT16
<a name="line-8577"></a>      type: DT_INT16
<a name="line-8578"></a>      type: DT_INT8
<a name="line-8579"></a>      type: DT_COMPLEX64
<a name="line-8580"></a>      type: DT_COMPLEX128
<a name="line-8581"></a>      type: DT_QINT8
<a name="line-8582"></a>      type: DT_QUINT8
<a name="line-8583"></a>      type: DT_QINT32
<a name="line-8584"></a>      type: DT_HALF
<a name="line-8585"></a>    }
<a name="line-8586"></a>  }
<a name="line-8587"></a>  name: "T"
<a name="line-8588"></a>  type: "type"
<a name="line-8589"></a>}
<a name="line-8590"></a>input_arg {
<a name="line-8591"></a>  description: "The original input dimensions."
<a name="line-8592"></a>  name: "orig_input_shape"
<a name="line-8593"></a>  type: DT_INT32
<a name="line-8594"></a>}
<a name="line-8595"></a>input_arg {
<a name="line-8596"></a>  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
<a name="line-8597"></a>  name: "grad"
<a name="line-8598"></a>  type_attr: "T"
<a name="line-8599"></a>}
<a name="line-8600"></a>output_arg {
<a name="line-8601"></a>  description: "The backprop for input."
<a name="line-8602"></a>  name: "output"
<a name="line-8603"></a>  type_attr: "T"
<a name="line-8604"></a>}
<a name="line-8605"></a>-}</span>
<a name="line-8606"></a>
<a name="line-8607"></a><span class='hs-comment'>-- | Restore a Reader to its initial clean state.</span>
<a name="line-8608"></a>
<a name="line-8609"></a><a name="readerReset"></a><span class='hs-definition'>readerReset</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-8610"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-8611"></a><span class='hs-definition'>readerReset</span> <span class='hs-varid'>reader_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8612"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderReset"</span><span class='hs-layout'>)</span>
<a name="line-8613"></a>        <span class='hs-varid'>reader_handle</span>
<a name="line-8614"></a><span class='hs-comment'>{-
<a name="line-8615"></a>input_arg {
<a name="line-8616"></a>  description: "Handle to a Reader."
<a name="line-8617"></a>  is_ref: true
<a name="line-8618"></a>  name: "reader_handle"
<a name="line-8619"></a>  type: DT_STRING
<a name="line-8620"></a>}
<a name="line-8621"></a>-}</span>
<a name="line-8622"></a>
<a name="line-8623"></a><a name="tileGrad"></a><span class='hs-comment'>-- | Returns the gradient of `Tile`.</span>
<a name="line-8624"></a><span class='hs-comment'>--</span>
<a name="line-8625"></a><span class='hs-comment'>-- Since `Tile` takes an input and repeats the input `multiples` times</span>
<a name="line-8626"></a><span class='hs-comment'>-- along each dimension, `TileGrad` takes in `multiples` and aggregates</span>
<a name="line-8627"></a><span class='hs-comment'>-- each repeated tile of `input` into `output`.</span>
<a name="line-8628"></a><span class='hs-definition'>tileGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-8629"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __multiples__</span>
<a name="line-8630"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-8631"></a><span class='hs-definition'>tileGrad</span> <span class='hs-varid'>input</span> <span class='hs-varid'>multiples</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8632"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TileGrad"</span>
<a name="line-8633"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8634"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>multiples</span>
<a name="line-8635"></a><span class='hs-comment'>{-
<a name="line-8636"></a>attr { name: "T" type: "type" }
<a name="line-8637"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-8638"></a>input_arg { name: "multiples" type: DT_INT32 }
<a name="line-8639"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8640"></a>-}</span>
<a name="line-8641"></a>
<a name="line-8642"></a><a name="expandDims"></a><span class='hs-comment'>-- | Inserts a dimension of 1 into a tensor's shape.</span>
<a name="line-8643"></a><span class='hs-comment'>--</span>
<a name="line-8644"></a><span class='hs-comment'>-- Given a tensor `input`, this operation inserts a dimension of 1 at the</span>
<a name="line-8645"></a><span class='hs-comment'>-- dimension index `dim` of `input`'s shape. The dimension index `dim` starts at</span>
<a name="line-8646"></a><span class='hs-comment'>-- zero; if you specify a negative number for `dim` it is counted backward from</span>
<a name="line-8647"></a><span class='hs-comment'>-- the end.</span>
<a name="line-8648"></a><span class='hs-comment'>-- </span>
<a name="line-8649"></a><span class='hs-comment'>-- This operation is useful if you want to add a batch dimension to a single</span>
<a name="line-8650"></a><span class='hs-comment'>-- element. For example, if you have a single image of shape `[height, width,</span>
<a name="line-8651"></a><span class='hs-comment'>-- channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,</span>
<a name="line-8652"></a><span class='hs-comment'>-- which will make the shape `[1, height, width, channels]`.</span>
<a name="line-8653"></a><span class='hs-comment'>-- </span>
<a name="line-8654"></a><span class='hs-comment'>-- Other examples:</span>
<a name="line-8655"></a><span class='hs-comment'>-- </span>
<a name="line-8656"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8657"></a><span class='hs-comment'>-- # 't' is a tensor of shape [2]</span>
<a name="line-8658"></a><span class='hs-comment'>-- shape(expand_dims(t, 0)) ==&gt; [1, 2]</span>
<a name="line-8659"></a><span class='hs-comment'>-- shape(expand_dims(t, 1)) ==&gt; [2, 1]</span>
<a name="line-8660"></a><span class='hs-comment'>-- shape(expand_dims(t, -1)) ==&gt; [2, 1]</span>
<a name="line-8661"></a><span class='hs-comment'>-- </span>
<a name="line-8662"></a><span class='hs-comment'>-- # 't2' is a tensor of shape [2, 3, 5]</span>
<a name="line-8663"></a><span class='hs-comment'>-- shape(expand_dims(t2, 0)) ==&gt; [1, 2, 3, 5]</span>
<a name="line-8664"></a><span class='hs-comment'>-- shape(expand_dims(t2, 2)) ==&gt; [2, 3, 1, 5]</span>
<a name="line-8665"></a><span class='hs-comment'>-- shape(expand_dims(t2, 3)) ==&gt; [2, 3, 5, 1]</span>
<a name="line-8666"></a><span class='hs-comment'>-- ```</span>
<a name="line-8667"></a><span class='hs-comment'>-- </span>
<a name="line-8668"></a><span class='hs-comment'>-- This operation requires that:</span>
<a name="line-8669"></a><span class='hs-comment'>-- </span>
<a name="line-8670"></a><span class='hs-comment'>-- `-1-input.dims() &lt;= dim &lt;= input.dims()`</span>
<a name="line-8671"></a><span class='hs-comment'>-- </span>
<a name="line-8672"></a><span class='hs-comment'>-- This operation is related to `squeeze()`, which removes dimensions of</span>
<a name="line-8673"></a><span class='hs-comment'>-- size 1.</span>
<a name="line-8674"></a><span class='hs-definition'>expandDims</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tdim</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tdim</span><span class='hs-layout'>,</span>
<a name="line-8675"></a>                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8676"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tdim</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8677"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-8678"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tdim</span> <span class='hs-comment'>-- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to</span>
<a name="line-8679"></a>                                <span class='hs-comment'>-- expand the shape of `input`.</span>
<a name="line-8680"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Contains the same data as `input`, but its shape has an additional</span>
<a name="line-8681"></a>              <span class='hs-comment'>-- dimension of size 1 added.</span>
<a name="line-8682"></a><span class='hs-definition'>expandDims</span> <span class='hs-varid'>input</span> <span class='hs-varid'>dim</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8683"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ExpandDims"</span>
<a name="line-8684"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8685"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tdim"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tdim</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8686"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>dim</span>
<a name="line-8687"></a><span class='hs-comment'>{-
<a name="line-8688"></a>attr { name: "T" type: "type" }
<a name="line-8689"></a>attr {
<a name="line-8690"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8691"></a>  default_value { type: DT_INT32 }
<a name="line-8692"></a>  name: "Tdim"
<a name="line-8693"></a>  type: "type"
<a name="line-8694"></a>}
<a name="line-8695"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-8696"></a>input_arg {
<a name="line-8697"></a>  description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`."
<a name="line-8698"></a>  name: "dim"
<a name="line-8699"></a>  type_attr: "Tdim"
<a name="line-8700"></a>}
<a name="line-8701"></a>output_arg {
<a name="line-8702"></a>  description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added."
<a name="line-8703"></a>  name: "output"
<a name="line-8704"></a>  type_attr: "T"
<a name="line-8705"></a>}
<a name="line-8706"></a>-}</span>
<a name="line-8707"></a>
<a name="line-8708"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with a tensor.</span>
<a name="line-8709"></a>
<a name="line-8710"></a><a name="tensorSummary"></a><span class='hs-definition'>tensorSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8711"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: A tensor to serialize.</span>
<a name="line-8712"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__</span>
<a name="line-8713"></a><span class='hs-definition'>tensorSummary</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8714"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorSummary"</span>
<a name="line-8715"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8716"></a>        <span class='hs-varid'>tensor</span>
<a name="line-8717"></a><span class='hs-comment'>{-
<a name="line-8718"></a>attr { name: "T" type: "type" }
<a name="line-8719"></a>attr {
<a name="line-8720"></a>  default_value { s: "" }
<a name="line-8721"></a>  description: "A json-encoded SummaryDescription proto."
<a name="line-8722"></a>  name: "description"
<a name="line-8723"></a>  type: "string"
<a name="line-8724"></a>}
<a name="line-8725"></a>attr {
<a name="line-8726"></a>  default_value { list { } }
<a name="line-8727"></a>  description: "An unused list of strings."
<a name="line-8728"></a>  name: "labels"
<a name="line-8729"></a>  type: "list(string)"
<a name="line-8730"></a>}
<a name="line-8731"></a>attr {
<a name="line-8732"></a>  default_value { s: "" }
<a name="line-8733"></a>  description: "An unused string."
<a name="line-8734"></a>  name: "display_name"
<a name="line-8735"></a>  type: "string"
<a name="line-8736"></a>}
<a name="line-8737"></a>input_arg {
<a name="line-8738"></a>  description: "A tensor to serialize." name: "tensor" type_attr: "T"
<a name="line-8739"></a>}
<a name="line-8740"></a>output_arg { name: "summary" type: DT_STRING }
<a name="line-8741"></a>-}</span>
<a name="line-8742"></a>
<a name="line-8743"></a><a name="tile"></a><span class='hs-comment'>-- | Constructs a tensor by tiling a given tensor.</span>
<a name="line-8744"></a><span class='hs-comment'>--</span>
<a name="line-8745"></a><span class='hs-comment'>-- This operation creates a new tensor by replicating `input` `multiples` times.</span>
<a name="line-8746"></a><span class='hs-comment'>-- The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,</span>
<a name="line-8747"></a><span class='hs-comment'>-- and the values of `input` are replicated `multiples[i]` times along the 'i'th</span>
<a name="line-8748"></a><span class='hs-comment'>-- dimension. For example, tiling `[a b c d]` by `[2]` produces</span>
<a name="line-8749"></a><span class='hs-comment'>-- `[a b c d a b c d]`.</span>
<a name="line-8750"></a><span class='hs-definition'>tile</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tmultiples</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tmultiples</span><span class='hs-layout'>,</span>
<a name="line-8751"></a>                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8752"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tmultiples</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8753"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 1-D or higher.</span>
<a name="line-8754"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tmultiples</span> <span class='hs-comment'>-- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input`</span>
<a name="line-8755"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-8756"></a><span class='hs-definition'>tile</span> <span class='hs-varid'>input</span> <span class='hs-varid'>multiples</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8757"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Tile"</span>
<a name="line-8758"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8759"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tmultiples"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tmultiples</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8760"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>multiples</span>
<a name="line-8761"></a><span class='hs-comment'>{-
<a name="line-8762"></a>attr { name: "T" type: "type" }
<a name="line-8763"></a>attr {
<a name="line-8764"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8765"></a>  default_value { type: DT_INT32 }
<a name="line-8766"></a>  name: "Tmultiples"
<a name="line-8767"></a>  type: "type"
<a name="line-8768"></a>}
<a name="line-8769"></a>input_arg {
<a name="line-8770"></a>  description: "1-D or higher." name: "input" type_attr: "T"
<a name="line-8771"></a>}
<a name="line-8772"></a>input_arg {
<a name="line-8773"></a>  description: "1-D. Length must be the same as the number of dimensions in `input`"
<a name="line-8774"></a>  name: "multiples"
<a name="line-8775"></a>  type_attr: "Tmultiples"
<a name="line-8776"></a>}
<a name="line-8777"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8778"></a>-}</span>
<a name="line-8779"></a>
<a name="line-8780"></a><a name="stridedSlice"></a><span class='hs-comment'>-- | Return a strided slice from `input`.</span>
<a name="line-8781"></a><span class='hs-comment'>--</span>
<a name="line-8782"></a><span class='hs-comment'>-- Note, most python users will want to use the Python `Tensor.__getitem__`</span>
<a name="line-8783"></a><span class='hs-comment'>-- or `Variable.__getitem__` rather than this op directly.</span>
<a name="line-8784"></a><span class='hs-comment'>-- </span>
<a name="line-8785"></a><span class='hs-comment'>-- The goal of this op is to produce a new tensor with a subset of</span>
<a name="line-8786"></a><span class='hs-comment'>-- the elements from the `n` dimensional `input` tensor. The subset is chosen using</span>
<a name="line-8787"></a><span class='hs-comment'>-- a sequence of `m` sparse range specifications encoded into the arguments</span>
<a name="line-8788"></a><span class='hs-comment'>-- of this function. Note, in some cases</span>
<a name="line-8789"></a><span class='hs-comment'>-- `m` could be equal to `n`, but this need not be the case. Each</span>
<a name="line-8790"></a><span class='hs-comment'>-- range specification entry can be one of the following:</span>
<a name="line-8791"></a><span class='hs-comment'>-- </span>
<a name="line-8792"></a><span class='hs-comment'>-- - An ellipsis (...). Ellipses are used to imply zero or more</span>
<a name="line-8793"></a><span class='hs-comment'>--   dimensions of full-dimension selection and are produced using</span>
<a name="line-8794"></a><span class='hs-comment'>--   `ellipsis_mask`. For example, `foo[...]` is the identity slice.</span>
<a name="line-8795"></a><span class='hs-comment'>-- </span>
<a name="line-8796"></a><span class='hs-comment'>-- - A new axis. This is used to insert a new shape=1 dimension and is</span>
<a name="line-8797"></a><span class='hs-comment'>--   produced using `new_axis_mask`. For example, `foo[:, ...]` where</span>
<a name="line-8798"></a><span class='hs-comment'>--   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.</span>
<a name="line-8799"></a><span class='hs-comment'>-- </span>
<a name="line-8800"></a><span class='hs-comment'>-- </span>
<a name="line-8801"></a><span class='hs-comment'>-- - A range `begin:end:stride`. This is used to specify how much to choose from</span>
<a name="line-8802"></a><span class='hs-comment'>--   a given dimension. `stride` can be any integer but 0.  `begin` is an integer</span>
<a name="line-8803"></a><span class='hs-comment'>--   which represents the index of the first value to select while `end` represents</span>
<a name="line-8804"></a><span class='hs-comment'>--   the index of the last value to select. The number of values selected in each</span>
<a name="line-8805"></a><span class='hs-comment'>--   dimension is `end - begin` if `stride &gt; 0` and `begin - end` if `stride &lt; 0`.</span>
<a name="line-8806"></a><span class='hs-comment'>--   `begin` and `end` can be negative where `-1` is the last element, `-2` is</span>
<a name="line-8807"></a><span class='hs-comment'>--   the second to last. `begin_mask` controls whether to replace the explicitly</span>
<a name="line-8808"></a><span class='hs-comment'>--   given `begin` with an implicit effective value of `0` if `stride &gt; 0` and</span>
<a name="line-8809"></a><span class='hs-comment'>--   `-1` if `stride &lt; 0`. `end_mask` is analogous but produces the number</span>
<a name="line-8810"></a><span class='hs-comment'>--   required to create the largest open interval. For example, given a shape</span>
<a name="line-8811"></a><span class='hs-comment'>--   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do</span>
<a name="line-8812"></a><span class='hs-comment'>--   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`</span>
<a name="line-8813"></a><span class='hs-comment'>--   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the</span>
<a name="line-8814"></a><span class='hs-comment'>--   first dimension of a tensor while dropping the last two (in the original</span>
<a name="line-8815"></a><span class='hs-comment'>--   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.</span>
<a name="line-8816"></a><span class='hs-comment'>-- </span>
<a name="line-8817"></a><span class='hs-comment'>-- - A single index. This is used to keep only elements that have a given</span>
<a name="line-8818"></a><span class='hs-comment'>--   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a</span>
<a name="line-8819"></a><span class='hs-comment'>--   shape `(6,)` tensor. This is encoded in `begin` and `end` and</span>
<a name="line-8820"></a><span class='hs-comment'>--   `shrink_axis_mask`.</span>
<a name="line-8821"></a><span class='hs-comment'>-- </span>
<a name="line-8822"></a><span class='hs-comment'>-- Each conceptual range specification is encoded in the op's argument. This</span>
<a name="line-8823"></a><span class='hs-comment'>-- encoding is best understand by considering a non-trivial example. In</span>
<a name="line-8824"></a><span class='hs-comment'>-- particular,</span>
<a name="line-8825"></a><span class='hs-comment'>-- `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as</span>
<a name="line-8826"></a><span class='hs-comment'>-- </span>
<a name="line-8827"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-8828"></a><span class='hs-comment'>-- begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)</span>
<a name="line-8829"></a><span class='hs-comment'>-- end = [2, 4, x, x, -3, x]</span>
<a name="line-8830"></a><span class='hs-comment'>-- strides = [1, 1, x, x, -1, 1]</span>
<a name="line-8831"></a><span class='hs-comment'>-- begin_mask = 1&lt;&lt;4 | 1 &lt;&lt; 5 = 48</span>
<a name="line-8832"></a><span class='hs-comment'>-- end_mask = 1&lt;&lt;5 = 32</span>
<a name="line-8833"></a><span class='hs-comment'>-- ellipsis_mask = 1&lt;&lt;3 = 8</span>
<a name="line-8834"></a><span class='hs-comment'>-- new_axis_mask = 1&lt;&lt;2 4</span>
<a name="line-8835"></a><span class='hs-comment'>-- shrink_axis_mask = 1&lt;&lt;0</span>
<a name="line-8836"></a><span class='hs-comment'>-- ```</span>
<a name="line-8837"></a><span class='hs-comment'>-- </span>
<a name="line-8838"></a><span class='hs-comment'>-- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of</span>
<a name="line-8839"></a><span class='hs-comment'>-- the slice becomes (2, 1, 5, 5, 2, 5).</span>
<a name="line-8840"></a><span class='hs-comment'>-- Let us walk step by step through each argument specification.</span>
<a name="line-8841"></a><span class='hs-comment'>-- </span>
<a name="line-8842"></a><span class='hs-comment'>-- 1.  The first argument in the example slice is turned into `begin = 1` and</span>
<a name="line-8843"></a><span class='hs-comment'>-- `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we</span>
<a name="line-8844"></a><span class='hs-comment'>-- also set the appropriate bit in `shrink_axis_mask`.</span>
<a name="line-8845"></a><span class='hs-comment'>-- </span>
<a name="line-8846"></a><span class='hs-comment'>-- 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have</span>
<a name="line-8847"></a><span class='hs-comment'>-- zero bits contributed.</span>
<a name="line-8848"></a><span class='hs-comment'>-- </span>
<a name="line-8849"></a><span class='hs-comment'>-- 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1</span>
<a name="line-8850"></a><span class='hs-comment'>-- dimension in the final shape. Dummy values are contributed to begin,</span>
<a name="line-8851"></a><span class='hs-comment'>-- end and stride, while the new_axis_mask bit is set.</span>
<a name="line-8852"></a><span class='hs-comment'>-- </span>
<a name="line-8853"></a><span class='hs-comment'>-- 4. `...` grab the full ranges from as many dimensions as needed to</span>
<a name="line-8854"></a><span class='hs-comment'>-- fully specify a slice for every dimension of the input shape.</span>
<a name="line-8855"></a><span class='hs-comment'>-- </span>
<a name="line-8856"></a><span class='hs-comment'>-- 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated</span>
<a name="line-8857"></a><span class='hs-comment'>-- with a dimension that has shape `s` is converted to a positive index</span>
<a name="line-8858"></a><span class='hs-comment'>-- `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion</span>
<a name="line-8859"></a><span class='hs-comment'>-- is done internally so begin, end and strides receive x, -3, and -1.</span>
<a name="line-8860"></a><span class='hs-comment'>-- The appropriate begin_mask bit is set to indicate the start range is the</span>
<a name="line-8861"></a><span class='hs-comment'>-- full range (ignoring the x).</span>
<a name="line-8862"></a><span class='hs-comment'>-- </span>
<a name="line-8863"></a><span class='hs-comment'>-- 6. `:` indicates that the entire contents of the corresponding dimension</span>
<a name="line-8864"></a><span class='hs-comment'>-- is selected. This is equivalent to `::` or `0::1`. begin, end, and strides</span>
<a name="line-8865"></a><span class='hs-comment'>-- receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and</span>
<a name="line-8866"></a><span class='hs-comment'>-- `end_mask` are also set.</span>
<a name="line-8867"></a><span class='hs-comment'>-- </span>
<a name="line-8868"></a><span class='hs-comment'>-- *Requirements*:</span>
<a name="line-8869"></a><span class='hs-comment'>--   `0 != strides[i] for i in [0, m)`</span>
<a name="line-8870"></a><span class='hs-comment'>--   `ellipsis_mask must be a power of two (only one ellipsis)`</span>
<a name="line-8871"></a><span class='hs-definition'>stridedSlice</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>index</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>index</span><span class='hs-layout'>,</span>
<a name="line-8872"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8873"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8874"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-8875"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification.</span>
<a name="line-8876"></a>                                   <span class='hs-comment'>-- The exact dimension this corresponds to will be determined by context.</span>
<a name="line-8877"></a>                                   <span class='hs-comment'>-- Out-of-bounds values will be silently clamped. If the `k`th bit of</span>
<a name="line-8878"></a>                                   <span class='hs-comment'>-- `begin_mask` then `begin[k]` is ignored and the full range of the</span>
<a name="line-8879"></a>                                   <span class='hs-comment'>-- appropriate dimension is used instead. Negative values causes indexing</span>
<a name="line-8880"></a>                                   <span class='hs-comment'>-- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.</span>
<a name="line-8881"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is</span>
<a name="line-8882"></a>                                   <span class='hs-comment'>-- used to determine full ranges.</span>
<a name="line-8883"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification</span>
<a name="line-8884"></a>                                   <span class='hs-comment'>-- after extracting a given element. Negative indices will reverse</span>
<a name="line-8885"></a>                                   <span class='hs-comment'>-- the original order. Out or range values are</span>
<a name="line-8886"></a>                                   <span class='hs-comment'>-- clamped to `[0,dim[i]) if slice[i]&gt;0` or `[-1,dim[i]-1] if slice[i] &lt; 0`</span>
<a name="line-8887"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-8888"></a><span class='hs-definition'>stridedSlice</span> <span class='hs-varid'>input</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8889"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StridedSlice"</span>
<a name="line-8890"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8891"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8892"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span>
<a name="line-8893"></a><span class='hs-comment'>{-
<a name="line-8894"></a>attr { name: "T" type: "type" }
<a name="line-8895"></a>attr {
<a name="line-8896"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8897"></a>  name: "Index"
<a name="line-8898"></a>  type: "type"
<a name="line-8899"></a>}
<a name="line-8900"></a>attr {
<a name="line-8901"></a>  default_value { i: 0 }
<a name="line-8902"></a>  description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] &gt; 0` or\n`[-1, n-1]` if `stride[i] &lt; 0`"
<a name="line-8903"></a>  name: "begin_mask"
<a name="line-8904"></a>  type: "int"
<a name="line-8905"></a>}
<a name="line-8906"></a>attr {
<a name="line-8907"></a>  default_value { i: 0 }
<a name="line-8908"></a>  description: "analogous to `begin_mask`"
<a name="line-8909"></a>  name: "end_mask"
<a name="line-8910"></a>  type: "int"
<a name="line-8911"></a>}
<a name="line-8912"></a>attr {
<a name="line-8913"></a>  default_value { i: 0 }
<a name="line-8914"></a>  description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 &lt;&lt; (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`."
<a name="line-8915"></a>  name: "ellipsis_mask"
<a name="line-8916"></a>  type: "int"
<a name="line-8917"></a>}
<a name="line-8918"></a>attr {
<a name="line-8919"></a>  default_value { i: 0 }
<a name="line-8920"></a>  description: "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor."
<a name="line-8921"></a>  name: "new_axis_mask"
<a name="line-8922"></a>  type: "int"
<a name="line-8923"></a>}
<a name="line-8924"></a>attr {
<a name="line-8925"></a>  default_value { i: 0 }
<a name="line-8926"></a>  description: "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2."
<a name="line-8927"></a>  name: "shrink_axis_mask"
<a name="line-8928"></a>  type: "int"
<a name="line-8929"></a>}
<a name="line-8930"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-8931"></a>input_arg {
<a name="line-8932"></a>  description: "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`."
<a name="line-8933"></a>  name: "begin"
<a name="line-8934"></a>  type_attr: "Index"
<a name="line-8935"></a>}
<a name="line-8936"></a>input_arg {
<a name="line-8937"></a>  description: "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges."
<a name="line-8938"></a>  name: "end"
<a name="line-8939"></a>  type_attr: "Index"
<a name="line-8940"></a>}
<a name="line-8941"></a>input_arg {
<a name="line-8942"></a>  description: "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]&gt;0` or `[-1,dim[i]-1] if slice[i] &lt; 0`"
<a name="line-8943"></a>  name: "strides"
<a name="line-8944"></a>  type_attr: "Index"
<a name="line-8945"></a>}
<a name="line-8946"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8947"></a>-}</span>
<a name="line-8948"></a>
<a name="line-8949"></a><a name="slice"></a><span class='hs-comment'>-- | Return a slice from 'input'.</span>
<a name="line-8950"></a><span class='hs-comment'>--</span>
<a name="line-8951"></a><span class='hs-comment'>-- The output tensor is a tensor with dimensions described by 'size'</span>
<a name="line-8952"></a><span class='hs-comment'>-- whose values are extracted from 'input' starting at the offsets in</span>
<a name="line-8953"></a><span class='hs-comment'>-- 'begin'.</span>
<a name="line-8954"></a><span class='hs-comment'>-- </span>
<a name="line-8955"></a><span class='hs-comment'>-- *Requirements*:</span>
<a name="line-8956"></a><span class='hs-comment'>--   0 &lt;= begin[i] &lt;= begin[i] + size[i] &lt;= Di  for i in [0, n)</span>
<a name="line-8957"></a><span class='hs-definition'>slice</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>index</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>index</span><span class='hs-layout'>,</span>
<a name="line-8958"></a>                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-8959"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-8960"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-8961"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of</span>
<a name="line-8962"></a>                            <span class='hs-comment'>-- 'input' to slice from.</span>
<a name="line-8963"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension</span>
<a name="line-8964"></a>                            <span class='hs-comment'>-- of 'input' to slice. If size[i] is -1, all remaining elements in dimension</span>
<a name="line-8965"></a>                            <span class='hs-comment'>-- i are included in the slice (i.e. this is equivalent to setting</span>
<a name="line-8966"></a>                            <span class='hs-comment'>-- size[i] = input.dim_size(i) - begin[i]).</span>
<a name="line-8967"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-8968"></a><span class='hs-definition'>slice</span> <span class='hs-varid'>input</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-8969"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Slice"</span>
<a name="line-8970"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-8971"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-8972"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>size</span>
<a name="line-8973"></a><span class='hs-comment'>{-
<a name="line-8974"></a>attr { name: "T" type: "type" }
<a name="line-8975"></a>attr {
<a name="line-8976"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-8977"></a>  name: "Index"
<a name="line-8978"></a>  type: "type"
<a name="line-8979"></a>}
<a name="line-8980"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-8981"></a>input_arg {
<a name="line-8982"></a>  description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from."
<a name="line-8983"></a>  name: "begin"
<a name="line-8984"></a>  type_attr: "Index"
<a name="line-8985"></a>}
<a name="line-8986"></a>input_arg {
<a name="line-8987"></a>  description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])."
<a name="line-8988"></a>  name: "size"
<a name="line-8989"></a>  type_attr: "Index"
<a name="line-8990"></a>}
<a name="line-8991"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-8992"></a>-}</span>
<a name="line-8993"></a>
<a name="line-8994"></a><a name="quantizedConv2D"></a><span class='hs-comment'>-- | Computes a 2D convolution given quantized 4D input and filter tensors.</span>
<a name="line-8995"></a><span class='hs-comment'>--</span>
<a name="line-8996"></a><span class='hs-comment'>-- The inputs are quantized tensors where the lowest value represents the real</span>
<a name="line-8997"></a><span class='hs-comment'>-- number of the associated minimum, and the highest represents the maximum.</span>
<a name="line-8998"></a><span class='hs-comment'>-- This means that you can only interpret the quantized output in the same way, by</span>
<a name="line-8999"></a><span class='hs-comment'>-- taking the returned minimum and maximum values into account.</span>
<a name="line-9000"></a><span class='hs-definition'>quantizedConv2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>tinput</span> <span class='hs-varid'>tfilter</span>
<a name="line-9001"></a>                   <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-9002"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9003"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-9004"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-9005"></a>                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tfilter</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-9006"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9007"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-9008"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tfilter</span><span class='hs-layout'>,</span>
<a name="line-9009"></a>                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-9010"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9011"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-9012"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9013"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-9014"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tfilter</span> <span class='hs-comment'>-- ^ __filter__: filter's input_depth dimension must match input's depth dimensions.</span>
<a name="line-9015"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_input__: The float value that the lowest quantized input value represents.</span>
<a name="line-9016"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_input__: The float value that the highest quantized input value represents.</span>
<a name="line-9017"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_filter__: The float value that the lowest quantized filter value represents.</span>
<a name="line-9018"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_filter__: The float value that the highest quantized filter value represents.</span>
<a name="line-9019"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-9020"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-9021"></a>                   <span class='hs-comment'>-- ^ (__output__, __min_output__, __max_output__)</span>
<a name="line-9022"></a>                   <span class='hs-comment'>--</span>
<a name="line-9023"></a>                   <span class='hs-comment'>-- * __output__</span>
<a name="line-9024"></a>                   <span class='hs-comment'>--</span>
<a name="line-9025"></a>                   <span class='hs-comment'>-- * __min_output__: The float value that the lowest quantized output value represents.</span>
<a name="line-9026"></a>                   <span class='hs-comment'>--</span>
<a name="line-9027"></a>                   <span class='hs-comment'>-- * __max_output__: The float value that the highest quantized output value represents.</span>
<a name="line-9028"></a><span class='hs-definition'>quantizedConv2D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-varid'>min_filter</span>
<a name="line-9029"></a>                <span class='hs-varid'>max_filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9030"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedConv2D"</span>
<a name="line-9031"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-9032"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tfilter"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tfilter</span><span class='hs-layout'>)</span>
<a name="line-9033"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9034"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-varid'>min_filter</span> <span class='hs-varid'>max_filter</span>
<a name="line-9035"></a><span class='hs-comment'>{-
<a name="line-9036"></a>attr {
<a name="line-9037"></a>  allowed_values {
<a name="line-9038"></a>    list {
<a name="line-9039"></a>      type: DT_QINT8
<a name="line-9040"></a>      type: DT_QUINT8
<a name="line-9041"></a>      type: DT_QINT16
<a name="line-9042"></a>      type: DT_QUINT16
<a name="line-9043"></a>      type: DT_QINT32
<a name="line-9044"></a>    }
<a name="line-9045"></a>  }
<a name="line-9046"></a>  name: "Tinput"
<a name="line-9047"></a>  type: "type"
<a name="line-9048"></a>}
<a name="line-9049"></a>attr {
<a name="line-9050"></a>  allowed_values {
<a name="line-9051"></a>    list {
<a name="line-9052"></a>      type: DT_QINT8
<a name="line-9053"></a>      type: DT_QUINT8
<a name="line-9054"></a>      type: DT_QINT16
<a name="line-9055"></a>      type: DT_QUINT16
<a name="line-9056"></a>      type: DT_QINT32
<a name="line-9057"></a>    }
<a name="line-9058"></a>  }
<a name="line-9059"></a>  name: "Tfilter"
<a name="line-9060"></a>  type: "type"
<a name="line-9061"></a>}
<a name="line-9062"></a>attr {
<a name="line-9063"></a>  allowed_values {
<a name="line-9064"></a>    list {
<a name="line-9065"></a>      type: DT_QINT8
<a name="line-9066"></a>      type: DT_QUINT8
<a name="line-9067"></a>      type: DT_QINT16
<a name="line-9068"></a>      type: DT_QUINT16
<a name="line-9069"></a>      type: DT_QINT32
<a name="line-9070"></a>    }
<a name="line-9071"></a>  }
<a name="line-9072"></a>  default_value { type: DT_QINT32 }
<a name="line-9073"></a>  name: "out_type"
<a name="line-9074"></a>  type: "type"
<a name="line-9075"></a>}
<a name="line-9076"></a>attr {
<a name="line-9077"></a>  description: "The stride of the sliding window for each dimension of the input\ntensor."
<a name="line-9078"></a>  name: "strides"
<a name="line-9079"></a>  type: "list(int)"
<a name="line-9080"></a>}
<a name="line-9081"></a>attr {
<a name="line-9082"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-9083"></a>  description: "The type of padding algorithm to use."
<a name="line-9084"></a>  name: "padding"
<a name="line-9085"></a>  type: "string"
<a name="line-9086"></a>}
<a name="line-9087"></a>input_arg { name: "input" type_attr: "Tinput" }
<a name="line-9088"></a>input_arg {
<a name="line-9089"></a>  description: "filter\'s input_depth dimension must match input\'s depth dimensions."
<a name="line-9090"></a>  name: "filter"
<a name="line-9091"></a>  type_attr: "Tfilter"
<a name="line-9092"></a>}
<a name="line-9093"></a>input_arg {
<a name="line-9094"></a>  description: "The float value that the lowest quantized input value represents."
<a name="line-9095"></a>  name: "min_input"
<a name="line-9096"></a>  type: DT_FLOAT
<a name="line-9097"></a>}
<a name="line-9098"></a>input_arg {
<a name="line-9099"></a>  description: "The float value that the highest quantized input value represents."
<a name="line-9100"></a>  name: "max_input"
<a name="line-9101"></a>  type: DT_FLOAT
<a name="line-9102"></a>}
<a name="line-9103"></a>input_arg {
<a name="line-9104"></a>  description: "The float value that the lowest quantized filter value represents."
<a name="line-9105"></a>  name: "min_filter"
<a name="line-9106"></a>  type: DT_FLOAT
<a name="line-9107"></a>}
<a name="line-9108"></a>input_arg {
<a name="line-9109"></a>  description: "The float value that the highest quantized filter value represents."
<a name="line-9110"></a>  name: "max_filter"
<a name="line-9111"></a>  type: DT_FLOAT
<a name="line-9112"></a>}
<a name="line-9113"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-9114"></a>output_arg {
<a name="line-9115"></a>  description: "The float value that the lowest quantized output value represents."
<a name="line-9116"></a>  name: "min_output"
<a name="line-9117"></a>  type: DT_FLOAT
<a name="line-9118"></a>}
<a name="line-9119"></a>output_arg {
<a name="line-9120"></a>  description: "The float value that the highest quantized output value represents."
<a name="line-9121"></a>  name: "max_output"
<a name="line-9122"></a>  type: DT_FLOAT
<a name="line-9123"></a>}
<a name="line-9124"></a>-}</span>
<a name="line-9125"></a>
<a name="line-9126"></a><span class='hs-comment'>-- | Computes rectified linear 6 gradients for a Relu6 operation.</span>
<a name="line-9127"></a>
<a name="line-9128"></a><a name="relu6Grad"></a><span class='hs-definition'>relu6Grad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-9129"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9130"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-9131"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-9132"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-9133"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9134"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9135"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation.</span>
<a name="line-9136"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__: The features passed as input to the corresponding Relu6 operation.</span>
<a name="line-9137"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprops__: The gradients:</span>
<a name="line-9138"></a>             <span class='hs-comment'>-- `gradients * (features &gt; 0) * (features &lt; 6)`.</span>
<a name="line-9139"></a><span class='hs-definition'>relu6Grad</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9140"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Relu6Grad"</span>
<a name="line-9141"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9142"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>features</span>
<a name="line-9143"></a><span class='hs-comment'>{-
<a name="line-9144"></a>attr {
<a name="line-9145"></a>  allowed_values {
<a name="line-9146"></a>    list {
<a name="line-9147"></a>      type: DT_FLOAT
<a name="line-9148"></a>      type: DT_DOUBLE
<a name="line-9149"></a>      type: DT_INT32
<a name="line-9150"></a>      type: DT_INT64
<a name="line-9151"></a>      type: DT_UINT8
<a name="line-9152"></a>      type: DT_INT16
<a name="line-9153"></a>      type: DT_INT8
<a name="line-9154"></a>      type: DT_UINT16
<a name="line-9155"></a>      type: DT_HALF
<a name="line-9156"></a>    }
<a name="line-9157"></a>  }
<a name="line-9158"></a>  name: "T"
<a name="line-9159"></a>  type: "type"
<a name="line-9160"></a>}
<a name="line-9161"></a>input_arg {
<a name="line-9162"></a>  description: "The backpropagated gradients to the corresponding Relu6 operation."
<a name="line-9163"></a>  name: "gradients"
<a name="line-9164"></a>  type_attr: "T"
<a name="line-9165"></a>}
<a name="line-9166"></a>input_arg {
<a name="line-9167"></a>  description: "The features passed as input to the corresponding Relu6 operation."
<a name="line-9168"></a>  name: "features"
<a name="line-9169"></a>  type_attr: "T"
<a name="line-9170"></a>}
<a name="line-9171"></a>output_arg {
<a name="line-9172"></a>  description: "The gradients:\n`gradients * (features &gt; 0) * (features &lt; 6)`."
<a name="line-9173"></a>  name: "backprops"
<a name="line-9174"></a>  type_attr: "T"
<a name="line-9175"></a>}
<a name="line-9176"></a>-}</span>
<a name="line-9177"></a>
<a name="line-9178"></a><span class='hs-comment'>-- | Computes gradients of the average pooling function.</span>
<a name="line-9179"></a>
<a name="line-9180"></a><a name="avgPoolGrad"></a><span class='hs-definition'>avgPoolGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9181"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9182"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __orig_input_shape__: 1-D.  Shape of the original input to `avg_pool`.</span>
<a name="line-9183"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.</span>
<a name="line-9184"></a>                              <span class='hs-comment'>-- the output of `avg_pool`.</span>
<a name="line-9185"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D.  Gradients w.r.t. the input of `avg_pool`.</span>
<a name="line-9186"></a><span class='hs-definition'>avgPoolGrad</span> <span class='hs-varid'>orig_input_shape</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9187"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AvgPoolGrad"</span>
<a name="line-9188"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9189"></a>        <span class='hs-varid'>orig_input_shape</span> <span class='hs-varid'>grad</span>
<a name="line-9190"></a><span class='hs-comment'>{-
<a name="line-9191"></a>attr {
<a name="line-9192"></a>  description: "The size of the sliding window for each dimension of the input."
<a name="line-9193"></a>  has_minimum: true
<a name="line-9194"></a>  minimum: 4
<a name="line-9195"></a>  name: "ksize"
<a name="line-9196"></a>  type: "list(int)"
<a name="line-9197"></a>}
<a name="line-9198"></a>attr {
<a name="line-9199"></a>  description: "The stride of the sliding window for each dimension of the input."
<a name="line-9200"></a>  has_minimum: true
<a name="line-9201"></a>  minimum: 4
<a name="line-9202"></a>  name: "strides"
<a name="line-9203"></a>  type: "list(int)"
<a name="line-9204"></a>}
<a name="line-9205"></a>attr {
<a name="line-9206"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-9207"></a>  description: "The type of padding algorithm to use."
<a name="line-9208"></a>  name: "padding"
<a name="line-9209"></a>  type: "string"
<a name="line-9210"></a>}
<a name="line-9211"></a>attr {
<a name="line-9212"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-9213"></a>  default_value { s: "NHWC" }
<a name="line-9214"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-9215"></a>  name: "data_format"
<a name="line-9216"></a>  type: "string"
<a name="line-9217"></a>}
<a name="line-9218"></a>attr {
<a name="line-9219"></a>  allowed_values {
<a name="line-9220"></a>    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
<a name="line-9221"></a>  }
<a name="line-9222"></a>  name: "T"
<a name="line-9223"></a>  type: "type"
<a name="line-9224"></a>}
<a name="line-9225"></a>input_arg {
<a name="line-9226"></a>  description: "1-D.  Shape of the original input to `avg_pool`."
<a name="line-9227"></a>  name: "orig_input_shape"
<a name="line-9228"></a>  type: DT_INT32
<a name="line-9229"></a>}
<a name="line-9230"></a>input_arg {
<a name="line-9231"></a>  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.\nthe output of `avg_pool`."
<a name="line-9232"></a>  name: "grad"
<a name="line-9233"></a>  type_attr: "T"
<a name="line-9234"></a>}
<a name="line-9235"></a>output_arg {
<a name="line-9236"></a>  description: "4-D.  Gradients w.r.t. the input of `avg_pool`."
<a name="line-9237"></a>  name: "output"
<a name="line-9238"></a>  type_attr: "T"
<a name="line-9239"></a>}
<a name="line-9240"></a>-}</span>
<a name="line-9241"></a>
<a name="line-9242"></a><a name="stringSplit"></a><span class='hs-comment'>-- | Split elements of `input` based on `delimiter` into a `SparseTensor`.</span>
<a name="line-9243"></a><span class='hs-comment'>--</span>
<a name="line-9244"></a><span class='hs-comment'>-- Let N be the size of source (typically N will be the batch size). Split each</span>
<a name="line-9245"></a><span class='hs-comment'>-- element of `input` based on `delimiter` and return a `SparseTensor`</span>
<a name="line-9246"></a><span class='hs-comment'>-- containing the splitted tokens. Empty tokens are ignored.</span>
<a name="line-9247"></a><span class='hs-comment'>-- </span>
<a name="line-9248"></a><span class='hs-comment'>-- `delimiter` can be empty or a single-byte character. If `delimiter` is an empty</span>
<a name="line-9249"></a><span class='hs-comment'>--  string, each element of `input` is split into individual single-byte character</span>
<a name="line-9250"></a><span class='hs-comment'>--  strings, including splitting of UTF-8 multibyte sequences.</span>
<a name="line-9251"></a><span class='hs-comment'>-- </span>
<a name="line-9252"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9253"></a><span class='hs-comment'>--   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output</span>
<a name="line-9254"></a><span class='hs-comment'>--   will be</span>
<a name="line-9255"></a><span class='hs-comment'>-- </span>
<a name="line-9256"></a><span class='hs-comment'>--   indices = [0, 0;</span>
<a name="line-9257"></a><span class='hs-comment'>--              0, 1;</span>
<a name="line-9258"></a><span class='hs-comment'>--              1, 0;</span>
<a name="line-9259"></a><span class='hs-comment'>--              1, 1;</span>
<a name="line-9260"></a><span class='hs-comment'>--              1, 2]</span>
<a name="line-9261"></a><span class='hs-comment'>--   shape = [2, 3]</span>
<a name="line-9262"></a><span class='hs-comment'>--   values = ['hello', 'world', 'a', 'b', 'c']</span>
<a name="line-9263"></a><span class='hs-definition'>stringSplit</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: 1-D. Strings to split.</span>
<a name="line-9264"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __delimiter__: 0-D. Delimiter character, or empty string.</span>
<a name="line-9265"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-9266"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-9267"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-9268"></a>               <span class='hs-comment'>-- ^ (__indices__, __values__, __shape__)</span>
<a name="line-9269"></a>               <span class='hs-comment'>--</span>
<a name="line-9270"></a>               <span class='hs-comment'>-- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor.</span>
<a name="line-9271"></a>               <span class='hs-comment'>--</span>
<a name="line-9272"></a>               <span class='hs-comment'>-- * __values__: A vector of strings corresponding to the splited values.</span>
<a name="line-9273"></a>               <span class='hs-comment'>--</span>
<a name="line-9274"></a>               <span class='hs-comment'>-- * __shape__: a length-2 vector of int64 representing the shape of the sparse</span>
<a name="line-9275"></a>               <span class='hs-comment'>-- tensor, where the first value is N and the second value is the maximum number</span>
<a name="line-9276"></a>               <span class='hs-comment'>-- of tokens in a single input entry.</span>
<a name="line-9277"></a><span class='hs-definition'>stringSplit</span> <span class='hs-varid'>input</span> <span class='hs-varid'>delimiter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9278"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringSplit"</span><span class='hs-layout'>)</span>
<a name="line-9279"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>delimiter</span>
<a name="line-9280"></a><span class='hs-comment'>{-
<a name="line-9281"></a>input_arg {
<a name="line-9282"></a>  description: "1-D. Strings to split." name: "input" type: DT_STRING
<a name="line-9283"></a>}
<a name="line-9284"></a>input_arg {
<a name="line-9285"></a>  description: "0-D. Delimiter character, or empty string."
<a name="line-9286"></a>  name: "delimiter"
<a name="line-9287"></a>  type: DT_STRING
<a name="line-9288"></a>}
<a name="line-9289"></a>output_arg {
<a name="line-9290"></a>  description: "A dense matrix of int64 representing the indices of the sparse tensor."
<a name="line-9291"></a>  name: "indices"
<a name="line-9292"></a>  type: DT_INT64
<a name="line-9293"></a>}
<a name="line-9294"></a>output_arg {
<a name="line-9295"></a>  description: "A vector of strings corresponding to the splited values."
<a name="line-9296"></a>  name: "values"
<a name="line-9297"></a>  type: DT_STRING
<a name="line-9298"></a>}
<a name="line-9299"></a>output_arg {
<a name="line-9300"></a>  description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry."
<a name="line-9301"></a>  name: "shape"
<a name="line-9302"></a>  type: DT_INT64
<a name="line-9303"></a>}
<a name="line-9304"></a>-}</span>
<a name="line-9305"></a>
<a name="line-9306"></a><a name="rank"></a><span class='hs-comment'>-- | Returns the rank of a tensor.</span>
<a name="line-9307"></a><span class='hs-comment'>--</span>
<a name="line-9308"></a><span class='hs-comment'>-- This operation returns an integer representing the rank of `input`.</span>
<a name="line-9309"></a><span class='hs-comment'>-- </span>
<a name="line-9310"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9311"></a><span class='hs-comment'>-- </span>
<a name="line-9312"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9313"></a><span class='hs-comment'>-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]</span>
<a name="line-9314"></a><span class='hs-comment'>-- # shape of tensor 't' is [2, 2, 3]</span>
<a name="line-9315"></a><span class='hs-comment'>-- rank(t) ==&gt; 3</span>
<a name="line-9316"></a><span class='hs-comment'>-- ```</span>
<a name="line-9317"></a><span class='hs-comment'>-- </span>
<a name="line-9318"></a><span class='hs-comment'>-- **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank</span>
<a name="line-9319"></a><span class='hs-comment'>-- of a tensor is the number of indices required to uniquely select each element</span>
<a name="line-9320"></a><span class='hs-comment'>-- of the tensor. Rank is also known as "order", "degree", or "ndims."</span>
<a name="line-9321"></a><span class='hs-definition'>rank</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-9322"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-9323"></a><span class='hs-definition'>rank</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9324"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Rank"</span>
<a name="line-9325"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9326"></a>        <span class='hs-varid'>input</span>
<a name="line-9327"></a><span class='hs-comment'>{-
<a name="line-9328"></a>attr { name: "T" type: "type" }
<a name="line-9329"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-9330"></a>output_arg { name: "output" type: DT_INT32 }
<a name="line-9331"></a>-}</span>
<a name="line-9332"></a>
<a name="line-9333"></a><a name="reciprocal"></a><span class='hs-comment'>-- | Computes the reciprocal of x element-wise.</span>
<a name="line-9334"></a><span class='hs-comment'>--</span>
<a name="line-9335"></a><span class='hs-comment'>-- I.e., \\(y = 1 / x\\).</span>
<a name="line-9336"></a><span class='hs-definition'>reciprocal</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-9337"></a>                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9338"></a>                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9339"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-9340"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9341"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-9342"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-9343"></a><span class='hs-definition'>reciprocal</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9344"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Reciprocal"</span>
<a name="line-9345"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9346"></a>        <span class='hs-varid'>x</span>
<a name="line-9347"></a><span class='hs-comment'>{-
<a name="line-9348"></a>attr {
<a name="line-9349"></a>  allowed_values {
<a name="line-9350"></a>    list {
<a name="line-9351"></a>      type: DT_HALF
<a name="line-9352"></a>      type: DT_FLOAT
<a name="line-9353"></a>      type: DT_DOUBLE
<a name="line-9354"></a>      type: DT_INT32
<a name="line-9355"></a>      type: DT_INT64
<a name="line-9356"></a>      type: DT_COMPLEX64
<a name="line-9357"></a>      type: DT_COMPLEX128
<a name="line-9358"></a>    }
<a name="line-9359"></a>  }
<a name="line-9360"></a>  name: "T"
<a name="line-9361"></a>  type: "type"
<a name="line-9362"></a>}
<a name="line-9363"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-9364"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-9365"></a>-}</span>
<a name="line-9366"></a>
<a name="line-9367"></a><a name="reverseSequence"></a><span class='hs-comment'>-- | Reverses variable length slices.</span>
<a name="line-9368"></a><span class='hs-comment'>--</span>
<a name="line-9369"></a><span class='hs-comment'>-- This op first slices `input` along the dimension `batch_dim`, and for each</span>
<a name="line-9370"></a><span class='hs-comment'>-- slice `i`, reverses the first `seq_lengths[i]` elements along</span>
<a name="line-9371"></a><span class='hs-comment'>-- the dimension `seq_dim`.</span>
<a name="line-9372"></a><span class='hs-comment'>-- </span>
<a name="line-9373"></a><span class='hs-comment'>-- The elements of `seq_lengths` must obey `seq_lengths[i] &lt; input.dims[seq_dim]`,</span>
<a name="line-9374"></a><span class='hs-comment'>-- and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.</span>
<a name="line-9375"></a><span class='hs-comment'>-- </span>
<a name="line-9376"></a><span class='hs-comment'>-- The output slice `i` along dimension `batch_dim` is then given by input</span>
<a name="line-9377"></a><span class='hs-comment'>-- slice `i`, with the first `seq_lengths[i]` slices along dimension</span>
<a name="line-9378"></a><span class='hs-comment'>-- `seq_dim` reversed.</span>
<a name="line-9379"></a><span class='hs-comment'>-- </span>
<a name="line-9380"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9381"></a><span class='hs-comment'>-- </span>
<a name="line-9382"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9383"></a><span class='hs-comment'>-- # Given this:</span>
<a name="line-9384"></a><span class='hs-comment'>-- batch_dim = 0</span>
<a name="line-9385"></a><span class='hs-comment'>-- seq_dim = 1</span>
<a name="line-9386"></a><span class='hs-comment'>-- input.dims = (4, 8, ...)</span>
<a name="line-9387"></a><span class='hs-comment'>-- seq_lengths = [7, 2, 3, 5]</span>
<a name="line-9388"></a><span class='hs-comment'>-- </span>
<a name="line-9389"></a><span class='hs-comment'>-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:</span>
<a name="line-9390"></a><span class='hs-comment'>-- output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]</span>
<a name="line-9391"></a><span class='hs-comment'>-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]</span>
<a name="line-9392"></a><span class='hs-comment'>-- output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]</span>
<a name="line-9393"></a><span class='hs-comment'>-- output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]</span>
<a name="line-9394"></a><span class='hs-comment'>-- </span>
<a name="line-9395"></a><span class='hs-comment'>-- # while entries past seq_lens are copied through:</span>
<a name="line-9396"></a><span class='hs-comment'>-- output[0, 7:, :, ...] = input[0, 7:, :, ...]</span>
<a name="line-9397"></a><span class='hs-comment'>-- output[1, 2:, :, ...] = input[1, 2:, :, ...]</span>
<a name="line-9398"></a><span class='hs-comment'>-- output[2, 3:, :, ...] = input[2, 3:, :, ...]</span>
<a name="line-9399"></a><span class='hs-comment'>-- output[3, 2:, :, ...] = input[3, 2:, :, ...]</span>
<a name="line-9400"></a><span class='hs-comment'>-- ```</span>
<a name="line-9401"></a><span class='hs-comment'>-- </span>
<a name="line-9402"></a><span class='hs-comment'>-- In contrast, if:</span>
<a name="line-9403"></a><span class='hs-comment'>-- </span>
<a name="line-9404"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9405"></a><span class='hs-comment'>-- # Given this:</span>
<a name="line-9406"></a><span class='hs-comment'>-- batch_dim = 2</span>
<a name="line-9407"></a><span class='hs-comment'>-- seq_dim = 0</span>
<a name="line-9408"></a><span class='hs-comment'>-- input.dims = (8, ?, 4, ...)</span>
<a name="line-9409"></a><span class='hs-comment'>-- seq_lengths = [7, 2, 3, 5]</span>
<a name="line-9410"></a><span class='hs-comment'>-- </span>
<a name="line-9411"></a><span class='hs-comment'>-- # then slices of input are reversed on seq_dim, but only up to seq_lengths:</span>
<a name="line-9412"></a><span class='hs-comment'>-- output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]</span>
<a name="line-9413"></a><span class='hs-comment'>-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]</span>
<a name="line-9414"></a><span class='hs-comment'>-- output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]</span>
<a name="line-9415"></a><span class='hs-comment'>-- output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]</span>
<a name="line-9416"></a><span class='hs-comment'>-- </span>
<a name="line-9417"></a><span class='hs-comment'>-- # while entries past seq_lens are copied through:</span>
<a name="line-9418"></a><span class='hs-comment'>-- output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]</span>
<a name="line-9419"></a><span class='hs-comment'>-- output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]</span>
<a name="line-9420"></a><span class='hs-comment'>-- output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]</span>
<a name="line-9421"></a><span class='hs-comment'>-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]</span>
<a name="line-9422"></a><span class='hs-comment'>-- ```</span>
<a name="line-9423"></a><span class='hs-definition'>reverseSequence</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tlen</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>,</span>
<a name="line-9424"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9425"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9426"></a>                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __seq_dim__: The dimension which is partially reversed.</span>
<a name="line-9427"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The input to reverse.</span>
<a name="line-9428"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tlen</span> <span class='hs-comment'>-- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and</span>
<a name="line-9429"></a>                                     <span class='hs-comment'>-- `max(seq_lengths) &lt; input.dims(seq_dim)`</span>
<a name="line-9430"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The partially reversed input. It has the same shape as `input`.</span>
<a name="line-9431"></a><span class='hs-definition'>reverseSequence</span> <span class='hs-varid'>seq_dim</span> <span class='hs-varid'>input</span> <span class='hs-varid'>seq_lengths</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9432"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReverseSequence"</span>
<a name="line-9433"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-9434"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tlen"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>)</span>
<a name="line-9435"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"seq_dim"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>seq_dim</span><span class='hs-layout'>)</span>
<a name="line-9436"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>seq_lengths</span>
<a name="line-9437"></a><span class='hs-comment'>{-
<a name="line-9438"></a>attr {
<a name="line-9439"></a>  description: "The dimension which is partially reversed."
<a name="line-9440"></a>  name: "seq_dim"
<a name="line-9441"></a>  type: "int"
<a name="line-9442"></a>}
<a name="line-9443"></a>attr {
<a name="line-9444"></a>  default_value { i: 0 }
<a name="line-9445"></a>  description: "The dimension along which reversal is performed."
<a name="line-9446"></a>  name: "batch_dim"
<a name="line-9447"></a>  type: "int"
<a name="line-9448"></a>}
<a name="line-9449"></a>attr { name: "T" type: "type" }
<a name="line-9450"></a>attr {
<a name="line-9451"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9452"></a>  default_value { type: DT_INT64 }
<a name="line-9453"></a>  name: "Tlen"
<a name="line-9454"></a>  type: "type"
<a name="line-9455"></a>}
<a name="line-9456"></a>input_arg {
<a name="line-9457"></a>  description: "The input to reverse." name: "input" type_attr: "T"
<a name="line-9458"></a>}
<a name="line-9459"></a>input_arg {
<a name="line-9460"></a>  description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) &lt; input.dims(seq_dim)`"
<a name="line-9461"></a>  name: "seq_lengths"
<a name="line-9462"></a>  type_attr: "Tlen"
<a name="line-9463"></a>}
<a name="line-9464"></a>output_arg {
<a name="line-9465"></a>  description: "The partially reversed input. It has the same shape as `input`."
<a name="line-9466"></a>  name: "output"
<a name="line-9467"></a>  type_attr: "T"
<a name="line-9468"></a>}
<a name="line-9469"></a>-}</span>
<a name="line-9470"></a>
<a name="line-9471"></a><a name="biasAddGrad"></a><span class='hs-comment'>-- | The backward operation for "BiasAdd" on the "bias" tensor.</span>
<a name="line-9472"></a><span class='hs-comment'>--</span>
<a name="line-9473"></a><span class='hs-comment'>-- It accumulates all the values from out_backprop into the feature dimension.</span>
<a name="line-9474"></a><span class='hs-comment'>-- For NHWC data format, the feature dimension is the last. For NCHW data format,</span>
<a name="line-9475"></a><span class='hs-comment'>-- the feature dimension is the third-to-last.</span>
<a name="line-9476"></a><span class='hs-definition'>biasAddGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-9477"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9478"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9479"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9480"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-9481"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9482"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9483"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: Any number of dimensions.</span>
<a name="line-9484"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D with size the feature dimension of `out_backprop`.</span>
<a name="line-9485"></a><span class='hs-definition'>biasAddGrad</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9486"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BiasAddGrad"</span>
<a name="line-9487"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9488"></a>        <span class='hs-varid'>out_backprop</span>
<a name="line-9489"></a><span class='hs-comment'>{-
<a name="line-9490"></a>attr {
<a name="line-9491"></a>  allowed_values {
<a name="line-9492"></a>    list {
<a name="line-9493"></a>      type: DT_FLOAT
<a name="line-9494"></a>      type: DT_DOUBLE
<a name="line-9495"></a>      type: DT_INT64
<a name="line-9496"></a>      type: DT_INT32
<a name="line-9497"></a>      type: DT_UINT8
<a name="line-9498"></a>      type: DT_UINT16
<a name="line-9499"></a>      type: DT_INT16
<a name="line-9500"></a>      type: DT_INT8
<a name="line-9501"></a>      type: DT_COMPLEX64
<a name="line-9502"></a>      type: DT_COMPLEX128
<a name="line-9503"></a>      type: DT_QINT8
<a name="line-9504"></a>      type: DT_QUINT8
<a name="line-9505"></a>      type: DT_QINT32
<a name="line-9506"></a>      type: DT_HALF
<a name="line-9507"></a>    }
<a name="line-9508"></a>  }
<a name="line-9509"></a>  name: "T"
<a name="line-9510"></a>  type: "type"
<a name="line-9511"></a>}
<a name="line-9512"></a>attr {
<a name="line-9513"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-9514"></a>  default_value { s: "NHWC" }
<a name="line-9515"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
<a name="line-9516"></a>  name: "data_format"
<a name="line-9517"></a>  type: "string"
<a name="line-9518"></a>}
<a name="line-9519"></a>input_arg {
<a name="line-9520"></a>  description: "Any number of dimensions."
<a name="line-9521"></a>  name: "out_backprop"
<a name="line-9522"></a>  type_attr: "T"
<a name="line-9523"></a>}
<a name="line-9524"></a>output_arg {
<a name="line-9525"></a>  description: "1-D with size the feature dimension of `out_backprop`."
<a name="line-9526"></a>  name: "output"
<a name="line-9527"></a>  type_attr: "T"
<a name="line-9528"></a>}
<a name="line-9529"></a>-}</span>
<a name="line-9530"></a>
<a name="line-9531"></a><a name="addSparseToTensorsMap"></a><span class='hs-comment'>-- | Add a `SparseTensor` to a `SparseTensorsMap` return its handle.</span>
<a name="line-9532"></a><span class='hs-comment'>--</span>
<a name="line-9533"></a><span class='hs-comment'>-- A `SparseTensor` is represented by three tensors: `sparse_indices`,</span>
<a name="line-9534"></a><span class='hs-comment'>-- `sparse_values`, and `sparse_shape`.</span>
<a name="line-9535"></a><span class='hs-comment'>-- </span>
<a name="line-9536"></a><span class='hs-comment'>-- This operator takes the given `SparseTensor` and adds it to a container</span>
<a name="line-9537"></a><span class='hs-comment'>-- object (a `SparseTensorsMap`).  A unique key within this container is generated</span>
<a name="line-9538"></a><span class='hs-comment'>-- in the form of an `int64`, and this is the value that is returned.</span>
<a name="line-9539"></a><span class='hs-comment'>-- </span>
<a name="line-9540"></a><span class='hs-comment'>-- The `SparseTensor` can then be read out as part of a minibatch by passing</span>
<a name="line-9541"></a><span class='hs-comment'>-- the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure</span>
<a name="line-9542"></a><span class='hs-comment'>-- the correct `SparseTensorsMap` is accessed, ensure that the same</span>
<a name="line-9543"></a><span class='hs-comment'>-- `container` and `shared_name` are passed to that Op.  If no `shared_name`</span>
<a name="line-9544"></a><span class='hs-comment'>-- is provided here, instead use the *name* of the Operation created by calling</span>
<a name="line-9545"></a><span class='hs-comment'>-- `AddSparseToTensorsMap` as the `shared_name` passed to</span>
<a name="line-9546"></a><span class='hs-comment'>-- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.</span>
<a name="line-9547"></a><span class='hs-definition'>addSparseToTensorsMap</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9548"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.</span>
<a name="line-9549"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.</span>
<a name="line-9550"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.</span>
<a name="line-9551"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __sparse_handle__: 0-D.  The handle of the `SparseTensor` now stored in the</span>
<a name="line-9552"></a>                         <span class='hs-comment'>-- `SparseTensorsMap`.</span>
<a name="line-9553"></a><span class='hs-definition'>addSparseToTensorsMap</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span>
<a name="line-9554"></a>                      <span class='hs-varid'>sparse_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9555"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AddSparseToTensorsMap"</span>
<a name="line-9556"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9557"></a>        <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>sparse_shape</span>
<a name="line-9558"></a><span class='hs-comment'>{-
<a name="line-9559"></a>attr { name: "T" type: "type" }
<a name="line-9560"></a>attr {
<a name="line-9561"></a>  default_value { s: "" }
<a name="line-9562"></a>  description: "The container name for the `SparseTensorsMap` created by this op."
<a name="line-9563"></a>  name: "container"
<a name="line-9564"></a>  type: "string"
<a name="line-9565"></a>}
<a name="line-9566"></a>attr {
<a name="line-9567"></a>  default_value { s: "" }
<a name="line-9568"></a>  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
<a name="line-9569"></a>  name: "shared_name"
<a name="line-9570"></a>  type: "string"
<a name="line-9571"></a>}
<a name="line-9572"></a>input_arg {
<a name="line-9573"></a>  description: "2-D.  The `indices` of the `SparseTensor`."
<a name="line-9574"></a>  name: "sparse_indices"
<a name="line-9575"></a>  type: DT_INT64
<a name="line-9576"></a>}
<a name="line-9577"></a>input_arg {
<a name="line-9578"></a>  description: "1-D.  The `values` of the `SparseTensor`."
<a name="line-9579"></a>  name: "sparse_values"
<a name="line-9580"></a>  type_attr: "T"
<a name="line-9581"></a>}
<a name="line-9582"></a>input_arg {
<a name="line-9583"></a>  description: "1-D.  The `shape` of the `SparseTensor`."
<a name="line-9584"></a>  name: "sparse_shape"
<a name="line-9585"></a>  type: DT_INT64
<a name="line-9586"></a>}
<a name="line-9587"></a>output_arg {
<a name="line-9588"></a>  description: "0-D.  The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`."
<a name="line-9589"></a>  name: "sparse_handle"
<a name="line-9590"></a>  type: DT_INT64
<a name="line-9591"></a>}
<a name="line-9592"></a>-}</span>
<a name="line-9593"></a>
<a name="line-9594"></a><span class='hs-comment'>-- | Computes tan of x element-wise.</span>
<a name="line-9595"></a>
<a name="line-9596"></a><a name="tan"></a><span class='hs-definition'>tan</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9597"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9598"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-9599"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9600"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-9601"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-9602"></a><span class='hs-definition'>tan</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9603"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Tan"</span>
<a name="line-9604"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9605"></a>        <span class='hs-varid'>x</span>
<a name="line-9606"></a><span class='hs-comment'>{-
<a name="line-9607"></a>attr {
<a name="line-9608"></a>  allowed_values {
<a name="line-9609"></a>    list {
<a name="line-9610"></a>      type: DT_HALF
<a name="line-9611"></a>      type: DT_FLOAT
<a name="line-9612"></a>      type: DT_DOUBLE
<a name="line-9613"></a>      type: DT_INT32
<a name="line-9614"></a>      type: DT_INT64
<a name="line-9615"></a>      type: DT_COMPLEX64
<a name="line-9616"></a>      type: DT_COMPLEX128
<a name="line-9617"></a>    }
<a name="line-9618"></a>  }
<a name="line-9619"></a>  name: "T"
<a name="line-9620"></a>  type: "type"
<a name="line-9621"></a>}
<a name="line-9622"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-9623"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-9624"></a>-}</span>
<a name="line-9625"></a>
<a name="line-9626"></a><a name="sparseReduceSumSparse"></a><span class='hs-comment'>-- | Computes the sum of elements across dimensions of a SparseTensor.</span>
<a name="line-9627"></a><span class='hs-comment'>--</span>
<a name="line-9628"></a><span class='hs-comment'>-- This Op takes a SparseTensor and is the sparse counterpart to</span>
<a name="line-9629"></a><span class='hs-comment'>-- `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a</span>
<a name="line-9630"></a><span class='hs-comment'>-- SparseTensor.</span>
<a name="line-9631"></a><span class='hs-comment'>-- </span>
<a name="line-9632"></a><span class='hs-comment'>-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless</span>
<a name="line-9633"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-9634"></a><span class='hs-comment'>-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained</span>
<a name="line-9635"></a><span class='hs-comment'>-- with length 1.</span>
<a name="line-9636"></a><span class='hs-comment'>-- </span>
<a name="line-9637"></a><span class='hs-comment'>-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor</span>
<a name="line-9638"></a><span class='hs-comment'>-- with a single element is returned.  Additionally, the axes can be negative,</span>
<a name="line-9639"></a><span class='hs-comment'>-- which are interpreted according to the indexing rules in Python.</span>
<a name="line-9640"></a><span class='hs-definition'>sparseReduceSumSparse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-9641"></a>                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9642"></a>                                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-9643"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-9644"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9645"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-9646"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-9647"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-9648"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-9649"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9650"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-9651"></a>                                                  <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-9652"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.</span>
<a name="line-9653"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-9654"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.</span>
<a name="line-9655"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-9656"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-9657"></a>                         <span class='hs-comment'>-- ^ (__output_indices__, __output_values__, __output_shape__)</span>
<a name="line-9658"></a>                         <span class='hs-comment'>--</span>
<a name="line-9659"></a>                         <span class='hs-comment'>-- * __output_indices__</span>
<a name="line-9660"></a>                         <span class='hs-comment'>--</span>
<a name="line-9661"></a>                         <span class='hs-comment'>-- * __output_values__</span>
<a name="line-9662"></a>                         <span class='hs-comment'>--</span>
<a name="line-9663"></a>                         <span class='hs-comment'>-- * __output_shape__</span>
<a name="line-9664"></a><span class='hs-definition'>sparseReduceSumSparse</span> <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span>
<a name="line-9665"></a>                      <span class='hs-varid'>reduction_axes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9666"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseReduceSumSparse"</span>
<a name="line-9667"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9668"></a>        <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span> <span class='hs-varid'>reduction_axes</span>
<a name="line-9669"></a><span class='hs-comment'>{-
<a name="line-9670"></a>attr {
<a name="line-9671"></a>  default_value { b: false }
<a name="line-9672"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-9673"></a>  name: "keep_dims"
<a name="line-9674"></a>  type: "bool"
<a name="line-9675"></a>}
<a name="line-9676"></a>attr {
<a name="line-9677"></a>  allowed_values {
<a name="line-9678"></a>    list {
<a name="line-9679"></a>      type: DT_FLOAT
<a name="line-9680"></a>      type: DT_DOUBLE
<a name="line-9681"></a>      type: DT_INT64
<a name="line-9682"></a>      type: DT_INT32
<a name="line-9683"></a>      type: DT_UINT8
<a name="line-9684"></a>      type: DT_UINT16
<a name="line-9685"></a>      type: DT_INT16
<a name="line-9686"></a>      type: DT_INT8
<a name="line-9687"></a>      type: DT_COMPLEX64
<a name="line-9688"></a>      type: DT_COMPLEX128
<a name="line-9689"></a>      type: DT_QINT8
<a name="line-9690"></a>      type: DT_QUINT8
<a name="line-9691"></a>      type: DT_QINT32
<a name="line-9692"></a>      type: DT_HALF
<a name="line-9693"></a>    }
<a name="line-9694"></a>  }
<a name="line-9695"></a>  name: "T"
<a name="line-9696"></a>  type: "type"
<a name="line-9697"></a>}
<a name="line-9698"></a>input_arg {
<a name="line-9699"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-9700"></a>  name: "input_indices"
<a name="line-9701"></a>  type: DT_INT64
<a name="line-9702"></a>}
<a name="line-9703"></a>input_arg {
<a name="line-9704"></a>  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
<a name="line-9705"></a>  name: "input_values"
<a name="line-9706"></a>  type_attr: "T"
<a name="line-9707"></a>}
<a name="line-9708"></a>input_arg {
<a name="line-9709"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-9710"></a>  name: "input_shape"
<a name="line-9711"></a>  type: DT_INT64
<a name="line-9712"></a>}
<a name="line-9713"></a>input_arg {
<a name="line-9714"></a>  description: "1-D.  Length-`K` vector containing the reduction axes."
<a name="line-9715"></a>  name: "reduction_axes"
<a name="line-9716"></a>  type: DT_INT32
<a name="line-9717"></a>}
<a name="line-9718"></a>output_arg { name: "output_indices" type: DT_INT64 }
<a name="line-9719"></a>output_arg { name: "output_values" type_attr: "T" }
<a name="line-9720"></a>output_arg { name: "output_shape" type: DT_INT64 }
<a name="line-9721"></a>-}</span>
<a name="line-9722"></a>
<a name="line-9723"></a><a name="shapeN"></a><span class='hs-comment'>-- | Returns shape of tensors.</span>
<a name="line-9724"></a><span class='hs-comment'>--</span>
<a name="line-9725"></a><span class='hs-comment'>-- This operation returns N 1-D integer tensors representing shape of `input[i]s`.</span>
<a name="line-9726"></a><span class='hs-definition'>shapeN</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-9727"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9728"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9729"></a>          <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-9730"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-9731"></a><span class='hs-definition'>shapeN</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"input"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>input</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9732"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>n</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ShapeN"</span>
<a name="line-9733"></a>                     <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-9734"></a>                     <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span>
<a name="line-9735"></a>                     <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-9736"></a>        <span class='hs-varid'>input</span>
<a name="line-9737"></a>  <span class='hs-keyword'>where</span>
<a name="line-9738"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>input</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-9739"></a><span class='hs-comment'>{-
<a name="line-9740"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-9741"></a>attr { name: "T" type: "type" }
<a name="line-9742"></a>attr {
<a name="line-9743"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9744"></a>  default_value { type: DT_INT32 }
<a name="line-9745"></a>  name: "out_type"
<a name="line-9746"></a>  type: "type"
<a name="line-9747"></a>}
<a name="line-9748"></a>input_arg { name: "input" number_attr: "N" type_attr: "T" }
<a name="line-9749"></a>output_arg {
<a name="line-9750"></a>  name: "output" number_attr: "N" type_attr: "out_type"
<a name="line-9751"></a>}
<a name="line-9752"></a>-}</span>
<a name="line-9753"></a>
<a name="line-9754"></a><a name="shape"></a><span class='hs-comment'>-- | Returns the shape of a tensor.</span>
<a name="line-9755"></a><span class='hs-comment'>--</span>
<a name="line-9756"></a><span class='hs-comment'>-- This operation returns a 1-D integer tensor representing the shape of `input`.</span>
<a name="line-9757"></a><span class='hs-comment'>-- </span>
<a name="line-9758"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9759"></a><span class='hs-comment'>-- </span>
<a name="line-9760"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9761"></a><span class='hs-comment'>-- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]</span>
<a name="line-9762"></a><span class='hs-comment'>-- shape(t) ==&gt; [2, 2, 3]</span>
<a name="line-9763"></a><span class='hs-comment'>-- ```</span>
<a name="line-9764"></a><span class='hs-definition'>shape</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-9765"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9766"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9767"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-9768"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-9769"></a><span class='hs-definition'>shape</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9770"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Shape"</span>
<a name="line-9771"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-9772"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9773"></a>        <span class='hs-varid'>input</span>
<a name="line-9774"></a><span class='hs-comment'>{-
<a name="line-9775"></a>attr { name: "T" type: "type" }
<a name="line-9776"></a>attr {
<a name="line-9777"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9778"></a>  default_value { type: DT_INT32 }
<a name="line-9779"></a>  name: "out_type"
<a name="line-9780"></a>  type: "type"
<a name="line-9781"></a>}
<a name="line-9782"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-9783"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-9784"></a>-}</span>
<a name="line-9785"></a>
<a name="line-9786"></a><a name="unique"></a><span class='hs-comment'>-- | Finds unique elements in a 1-D tensor.</span>
<a name="line-9787"></a><span class='hs-comment'>--</span>
<a name="line-9788"></a><span class='hs-comment'>-- This operation returns a tensor `y` containing all of the unique elements of `x`</span>
<a name="line-9789"></a><span class='hs-comment'>-- sorted in the same order that they occur in `x`. This operation also returns a</span>
<a name="line-9790"></a><span class='hs-comment'>-- tensor `idx` the same size as `x` that contains the index of each value of `x`</span>
<a name="line-9791"></a><span class='hs-comment'>-- in the unique output `y`. In other words:</span>
<a name="line-9792"></a><span class='hs-comment'>-- </span>
<a name="line-9793"></a><span class='hs-comment'>-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`</span>
<a name="line-9794"></a><span class='hs-comment'>-- </span>
<a name="line-9795"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9796"></a><span class='hs-comment'>-- </span>
<a name="line-9797"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9798"></a><span class='hs-comment'>-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]</span>
<a name="line-9799"></a><span class='hs-comment'>-- y, idx = unique(x)</span>
<a name="line-9800"></a><span class='hs-comment'>-- y ==&gt; [1, 2, 4, 7, 8]</span>
<a name="line-9801"></a><span class='hs-comment'>-- idx ==&gt; [0, 0, 1, 2, 2, 2, 3, 4, 4]</span>
<a name="line-9802"></a><span class='hs-comment'>-- ```</span>
<a name="line-9803"></a><span class='hs-definition'>unique</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_idx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>,</span>
<a name="line-9804"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9805"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9806"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: 1-D.</span>
<a name="line-9807"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ (__y__, __idx__)</span>
<a name="line-9808"></a>          <span class='hs-comment'>--</span>
<a name="line-9809"></a>          <span class='hs-comment'>-- * __y__: 1-D.</span>
<a name="line-9810"></a>          <span class='hs-comment'>--</span>
<a name="line-9811"></a>          <span class='hs-comment'>-- * __idx__: 1-D.</span>
<a name="line-9812"></a><span class='hs-definition'>unique</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9813"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Unique"</span>
<a name="line-9814"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-9815"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_idx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9816"></a>        <span class='hs-varid'>x</span>
<a name="line-9817"></a><span class='hs-comment'>{-
<a name="line-9818"></a>attr { name: "T" type: "type" }
<a name="line-9819"></a>attr {
<a name="line-9820"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9821"></a>  default_value { type: DT_INT32 }
<a name="line-9822"></a>  name: "out_idx"
<a name="line-9823"></a>  type: "type"
<a name="line-9824"></a>}
<a name="line-9825"></a>input_arg { description: "1-D." name: "x" type_attr: "T" }
<a name="line-9826"></a>output_arg { description: "1-D." name: "y" type_attr: "T" }
<a name="line-9827"></a>output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
<a name="line-9828"></a>-}</span>
<a name="line-9829"></a>
<a name="line-9830"></a><a name="truncatedNormal"></a><span class='hs-comment'>-- | Outputs random values from a truncated normal distribution.</span>
<a name="line-9831"></a><span class='hs-comment'>--</span>
<a name="line-9832"></a><span class='hs-comment'>-- The generated values follow a normal distribution with mean 0 and standard</span>
<a name="line-9833"></a><span class='hs-comment'>-- deviation 1, except that values whose magnitude is more than 2 standard</span>
<a name="line-9834"></a><span class='hs-comment'>-- deviations from the mean are dropped and re-picked.</span>
<a name="line-9835"></a><span class='hs-definition'>truncatedNormal</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-9836"></a>                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9837"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-9838"></a>                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9839"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9840"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __shape__: The shape of the output tensor.</span>
<a name="line-9841"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor of the specified shape filled with random truncated normal</span>
<a name="line-9842"></a>                   <span class='hs-comment'>-- values.</span>
<a name="line-9843"></a><span class='hs-definition'>truncatedNormal</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9844"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TruncatedNormal"</span>
<a name="line-9845"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-9846"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9847"></a>        <span class='hs-varid'>shape</span>
<a name="line-9848"></a><span class='hs-comment'>{-
<a name="line-9849"></a>attr {
<a name="line-9850"></a>  default_value { i: 0 }
<a name="line-9851"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-9852"></a>  name: "seed"
<a name="line-9853"></a>  type: "int"
<a name="line-9854"></a>}
<a name="line-9855"></a>attr {
<a name="line-9856"></a>  default_value { i: 0 }
<a name="line-9857"></a>  description: "A second seed to avoid seed collision."
<a name="line-9858"></a>  name: "seed2"
<a name="line-9859"></a>  type: "int"
<a name="line-9860"></a>}
<a name="line-9861"></a>attr {
<a name="line-9862"></a>  allowed_values {
<a name="line-9863"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-9864"></a>  }
<a name="line-9865"></a>  description: "The type of the output."
<a name="line-9866"></a>  name: "dtype"
<a name="line-9867"></a>  type: "type"
<a name="line-9868"></a>}
<a name="line-9869"></a>attr {
<a name="line-9870"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9871"></a>  name: "T"
<a name="line-9872"></a>  type: "type"
<a name="line-9873"></a>}
<a name="line-9874"></a>input_arg {
<a name="line-9875"></a>  description: "The shape of the output tensor."
<a name="line-9876"></a>  name: "shape"
<a name="line-9877"></a>  type_attr: "T"
<a name="line-9878"></a>}
<a name="line-9879"></a>output_arg {
<a name="line-9880"></a>  description: "A tensor of the specified shape filled with random truncated normal\nvalues."
<a name="line-9881"></a>  name: "output"
<a name="line-9882"></a>  type_attr: "dtype"
<a name="line-9883"></a>}
<a name="line-9884"></a>-}</span>
<a name="line-9885"></a>
<a name="line-9886"></a><a name="invertPermutation"></a><span class='hs-comment'>-- | Computes the inverse permutation of a tensor.</span>
<a name="line-9887"></a><span class='hs-comment'>--</span>
<a name="line-9888"></a><span class='hs-comment'>-- This operation computes the inverse of an index permutation. It takes a 1-D</span>
<a name="line-9889"></a><span class='hs-comment'>-- integer tensor `x`, which represents the indices of a zero-based array, and</span>
<a name="line-9890"></a><span class='hs-comment'>-- swaps each value with its index position. In other words, for an output tensor</span>
<a name="line-9891"></a><span class='hs-comment'>-- `y` and an input tensor `x`, this operation computes the following:</span>
<a name="line-9892"></a><span class='hs-comment'>-- </span>
<a name="line-9893"></a><span class='hs-comment'>-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`</span>
<a name="line-9894"></a><span class='hs-comment'>-- </span>
<a name="line-9895"></a><span class='hs-comment'>-- The values must include 0. There can be no duplicate values or negative values.</span>
<a name="line-9896"></a><span class='hs-comment'>-- </span>
<a name="line-9897"></a><span class='hs-comment'>-- For example:</span>
<a name="line-9898"></a><span class='hs-comment'>-- </span>
<a name="line-9899"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-9900"></a><span class='hs-comment'>-- # tensor `x` is [3, 4, 0, 2, 1]</span>
<a name="line-9901"></a><span class='hs-comment'>-- invert_permutation(x) ==&gt; [2, 4, 3, 0, 1]</span>
<a name="line-9902"></a><span class='hs-comment'>-- ```</span>
<a name="line-9903"></a><span class='hs-definition'>invertPermutation</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-9904"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9905"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: 1-D.</span>
<a name="line-9906"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__: 1-D.</span>
<a name="line-9907"></a><span class='hs-definition'>invertPermutation</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9908"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"InvertPermutation"</span>
<a name="line-9909"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9910"></a>        <span class='hs-varid'>x</span>
<a name="line-9911"></a><span class='hs-comment'>{-
<a name="line-9912"></a>attr {
<a name="line-9913"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-9914"></a>  default_value { type: DT_INT32 }
<a name="line-9915"></a>  name: "T"
<a name="line-9916"></a>  type: "type"
<a name="line-9917"></a>}
<a name="line-9918"></a>input_arg { description: "1-D." name: "x" type_attr: "T" }
<a name="line-9919"></a>output_arg { description: "1-D." name: "y" type_attr: "T" }
<a name="line-9920"></a>-}</span>
<a name="line-9921"></a>
<a name="line-9922"></a><a name="checkNumerics"></a><span class='hs-comment'>-- | Checks a tensor for NaN and Inf values.</span>
<a name="line-9923"></a><span class='hs-comment'>--</span>
<a name="line-9924"></a><span class='hs-comment'>-- When run, reports an `InvalidArgument` error if `tensor` has any values</span>
<a name="line-9925"></a><span class='hs-comment'>-- that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.</span>
<a name="line-9926"></a><span class='hs-definition'>checkNumerics</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-9927"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-9928"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__</span>
<a name="line-9929"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-9930"></a><span class='hs-definition'>checkNumerics</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9931"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CheckNumerics"</span>
<a name="line-9932"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-9933"></a>        <span class='hs-varid'>tensor</span>
<a name="line-9934"></a><span class='hs-comment'>{-
<a name="line-9935"></a>attr {
<a name="line-9936"></a>  allowed_values {
<a name="line-9937"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-9938"></a>  }
<a name="line-9939"></a>  name: "T"
<a name="line-9940"></a>  type: "type"
<a name="line-9941"></a>}
<a name="line-9942"></a>attr {
<a name="line-9943"></a>  description: "Prefix of the error message."
<a name="line-9944"></a>  name: "message"
<a name="line-9945"></a>  type: "string"
<a name="line-9946"></a>}
<a name="line-9947"></a>input_arg { name: "tensor" type_attr: "T" }
<a name="line-9948"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-9949"></a>-}</span>
<a name="line-9950"></a>
<a name="line-9951"></a><a name="uniformCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a uniform distribution.</span>
<a name="line-9952"></a><span class='hs-comment'>--</span>
<a name="line-9953"></a><span class='hs-comment'>-- See explanations of candidate sampling and the data formats at</span>
<a name="line-9954"></a><span class='hs-comment'>-- go/candidate-sampling.</span>
<a name="line-9955"></a><span class='hs-comment'>-- </span>
<a name="line-9956"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-9957"></a><span class='hs-comment'>-- </span>
<a name="line-9958"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-9959"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-9960"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-9961"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-9962"></a><span class='hs-definition'>uniformCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to randomly sample per batch.</span>
<a name="line-9963"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-9964"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).</span>
<a name="line-9965"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-9966"></a>                                   <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-9967"></a>                                   <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-9968"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-9969"></a>                                                       <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-9970"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-9971"></a>                               <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-9972"></a>                           <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-9973"></a>                           <span class='hs-comment'>--</span>
<a name="line-9974"></a>                           <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-9975"></a>                           <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-9976"></a>                           <span class='hs-comment'>--</span>
<a name="line-9977"></a>                           <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-9978"></a>                           <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-9979"></a>                           <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-9980"></a>                           <span class='hs-comment'>--</span>
<a name="line-9981"></a>                           <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-9982"></a>                           <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-9983"></a>                           <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-9984"></a>                           <span class='hs-comment'>-- probability.</span>
<a name="line-9985"></a><span class='hs-definition'>uniformCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>range_max</span> <span class='hs-varid'>unique</span>
<a name="line-9986"></a>                        <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-9987"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"UniformCandidateSampler"</span>
<a name="line-9988"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-9989"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-9990"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"range_max"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>range_max</span>
<a name="line-9991"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-9992"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-9993"></a><span class='hs-comment'>{-
<a name="line-9994"></a>attr {
<a name="line-9995"></a>  description: "Number of true labels per context."
<a name="line-9996"></a>  has_minimum: true
<a name="line-9997"></a>  minimum: 1
<a name="line-9998"></a>  name: "num_true"
<a name="line-9999"></a>  type: "int"
<a name="line-10000"></a>}
<a name="line-10001"></a>attr {
<a name="line-10002"></a>  description: "Number of candidates to randomly sample per batch."
<a name="line-10003"></a>  has_minimum: true
<a name="line-10004"></a>  minimum: 1
<a name="line-10005"></a>  name: "num_sampled"
<a name="line-10006"></a>  type: "int"
<a name="line-10007"></a>}
<a name="line-10008"></a>attr {
<a name="line-10009"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-10010"></a>  name: "unique"
<a name="line-10011"></a>  type: "bool"
<a name="line-10012"></a>}
<a name="line-10013"></a>attr {
<a name="line-10014"></a>  description: "The sampler will sample integers from the interval [0, range_max)."
<a name="line-10015"></a>  has_minimum: true
<a name="line-10016"></a>  minimum: 1
<a name="line-10017"></a>  name: "range_max"
<a name="line-10018"></a>  type: "int"
<a name="line-10019"></a>}
<a name="line-10020"></a>attr {
<a name="line-10021"></a>  default_value { i: 0 }
<a name="line-10022"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-10023"></a>  name: "seed"
<a name="line-10024"></a>  type: "int"
<a name="line-10025"></a>}
<a name="line-10026"></a>attr {
<a name="line-10027"></a>  default_value { i: 0 }
<a name="line-10028"></a>  description: "An second seed to avoid seed collision."
<a name="line-10029"></a>  name: "seed2"
<a name="line-10030"></a>  type: "int"
<a name="line-10031"></a>}
<a name="line-10032"></a>input_arg {
<a name="line-10033"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-10034"></a>  name: "true_classes"
<a name="line-10035"></a>  type: DT_INT64
<a name="line-10036"></a>}
<a name="line-10037"></a>output_arg {
<a name="line-10038"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-10039"></a>  name: "sampled_candidates"
<a name="line-10040"></a>  type: DT_INT64
<a name="line-10041"></a>}
<a name="line-10042"></a>output_arg {
<a name="line-10043"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-10044"></a>  name: "true_expected_count"
<a name="line-10045"></a>  type: DT_FLOAT
<a name="line-10046"></a>}
<a name="line-10047"></a>output_arg {
<a name="line-10048"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-10049"></a>  name: "sampled_expected_count"
<a name="line-10050"></a>  type: DT_FLOAT
<a name="line-10051"></a>}
<a name="line-10052"></a>-}</span>
<a name="line-10053"></a>
<a name="line-10054"></a><a name="gather"></a><span class='hs-comment'>-- | Gather slices from `params` according to `indices`.</span>
<a name="line-10055"></a><span class='hs-comment'>--</span>
<a name="line-10056"></a><span class='hs-comment'>-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).</span>
<a name="line-10057"></a><span class='hs-comment'>-- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:</span>
<a name="line-10058"></a><span class='hs-comment'>-- </span>
<a name="line-10059"></a><span class='hs-comment'>-- ```python</span>
<a name="line-10060"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-10061"></a><span class='hs-comment'>--     output[:, ..., :] = params[indices, :, ... :]</span>
<a name="line-10062"></a><span class='hs-comment'>-- </span>
<a name="line-10063"></a><span class='hs-comment'>--     # Vector indices</span>
<a name="line-10064"></a><span class='hs-comment'>--     output[i, :, ..., :] = params[indices[i], :, ... :]</span>
<a name="line-10065"></a><span class='hs-comment'>-- </span>
<a name="line-10066"></a><span class='hs-comment'>--     # Higher rank indices</span>
<a name="line-10067"></a><span class='hs-comment'>--     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]</span>
<a name="line-10068"></a><span class='hs-comment'>-- ```</span>
<a name="line-10069"></a><span class='hs-comment'>-- </span>
<a name="line-10070"></a><span class='hs-comment'>-- If `indices` is a permutation and `len(indices) == params.shape[0]` then</span>
<a name="line-10071"></a><span class='hs-comment'>-- this operation will permute `params` accordingly.</span>
<a name="line-10072"></a><span class='hs-comment'>-- </span>
<a name="line-10073"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-10074"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/Gather.png" alt&gt;</span>
<a name="line-10075"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-10076"></a><span class='hs-definition'>gather</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tparams</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tparams</span><span class='hs-layout'>,</span>
<a name="line-10077"></a>                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-10078"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10079"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10080"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tparams</span> <span class='hs-comment'>-- ^ __params__</span>
<a name="line-10081"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__</span>
<a name="line-10082"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tparams</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-10083"></a><span class='hs-definition'>gather</span> <span class='hs-varid'>params</span> <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10084"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Gather"</span>
<a name="line-10085"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tparams"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tparams</span><span class='hs-layout'>)</span>
<a name="line-10086"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10087"></a>        <span class='hs-varid'>params</span> <span class='hs-varid'>indices</span>
<a name="line-10088"></a><span class='hs-comment'>{-
<a name="line-10089"></a>attr {
<a name="line-10090"></a>  default_value { b: true } name: "validate_indices" type: "bool"
<a name="line-10091"></a>}
<a name="line-10092"></a>attr { name: "Tparams" type: "type" }
<a name="line-10093"></a>attr {
<a name="line-10094"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-10095"></a>  name: "Tindices"
<a name="line-10096"></a>  type: "type"
<a name="line-10097"></a>}
<a name="line-10098"></a>input_arg { name: "params" type_attr: "Tparams" }
<a name="line-10099"></a>input_arg { name: "indices" type_attr: "Tindices" }
<a name="line-10100"></a>output_arg { name: "output" type_attr: "Tparams" }
<a name="line-10101"></a>-}</span>
<a name="line-10102"></a>
<a name="line-10103"></a><span class='hs-comment'>-- | Returns a constant tensor.</span>
<a name="line-10104"></a>
<a name="line-10105"></a><a name="const"></a><span class='hs-definition'>const</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-10106"></a><span class='hs-definition'>const</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10107"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Const"</span>
<a name="line-10108"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10109"></a>        
<a name="line-10110"></a><span class='hs-comment'>{-
<a name="line-10111"></a>attr {
<a name="line-10112"></a>  description: "Attr `value` is the tensor to return."
<a name="line-10113"></a>  name: "value"
<a name="line-10114"></a>  type: "tensor"
<a name="line-10115"></a>}
<a name="line-10116"></a>attr { name: "dtype" type: "type" }
<a name="line-10117"></a>output_arg { name: "output" type_attr: "dtype" }
<a name="line-10118"></a>-}</span>
<a name="line-10119"></a>
<a name="line-10120"></a><a name="fill"></a><span class='hs-comment'>-- | Creates a tensor filled with a scalar value.</span>
<a name="line-10121"></a><span class='hs-comment'>--</span>
<a name="line-10122"></a><span class='hs-comment'>-- This operation creates a tensor of shape `dims` and fills it with `value`.</span>
<a name="line-10123"></a><span class='hs-comment'>-- </span>
<a name="line-10124"></a><span class='hs-comment'>-- For example:</span>
<a name="line-10125"></a><span class='hs-comment'>-- </span>
<a name="line-10126"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-10127"></a><span class='hs-comment'>-- # Output tensor has shape [2, 3].</span>
<a name="line-10128"></a><span class='hs-comment'>-- fill([2, 3], 9) ==&gt; [[9, 9, 9]</span>
<a name="line-10129"></a><span class='hs-comment'>--                      [9, 9, 9]]</span>
<a name="line-10130"></a><span class='hs-comment'>-- ```</span>
<a name="line-10131"></a><span class='hs-definition'>fill</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10132"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __dims__: 1-D. Represents the shape of the output tensor.</span>
<a name="line-10133"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: 0-D (scalar). Value to fill the returned tensor.</span>
<a name="line-10134"></a>                       <span class='hs-comment'>-- </span>
<a name="line-10135"></a>                       <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-10136"></a>                       <span class='hs-comment'>-- Equivalent to np.full</span>
<a name="line-10137"></a>                       <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-10138"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-10139"></a><span class='hs-definition'>fill</span> <span class='hs-varid'>dims</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10140"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Fill"</span>
<a name="line-10141"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10142"></a>        <span class='hs-varid'>dims</span> <span class='hs-varid'>value</span>
<a name="line-10143"></a><span class='hs-comment'>{-
<a name="line-10144"></a>attr { name: "T" type: "type" }
<a name="line-10145"></a>input_arg {
<a name="line-10146"></a>  description: "1-D. Represents the shape of the output tensor."
<a name="line-10147"></a>  name: "dims"
<a name="line-10148"></a>  type: DT_INT32
<a name="line-10149"></a>}
<a name="line-10150"></a>input_arg {
<a name="line-10151"></a>  description: "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility"
<a name="line-10152"></a>  name: "value"
<a name="line-10153"></a>  type_attr: "T"
<a name="line-10154"></a>}
<a name="line-10155"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-10156"></a>-}</span>
<a name="line-10157"></a>
<a name="line-10158"></a><a name="editDistance"></a><span class='hs-comment'>-- | Computes the (possibly normalized) Levenshtein Edit Distance.</span>
<a name="line-10159"></a><span class='hs-comment'>--</span>
<a name="line-10160"></a><span class='hs-comment'>-- The inputs are variable-length sequences provided by SparseTensors</span>
<a name="line-10161"></a><span class='hs-comment'>--   (hypothesis_indices, hypothesis_values, hypothesis_shape)</span>
<a name="line-10162"></a><span class='hs-comment'>-- and</span>
<a name="line-10163"></a><span class='hs-comment'>--   (truth_indices, truth_values, truth_shape).</span>
<a name="line-10164"></a><span class='hs-comment'>-- </span>
<a name="line-10165"></a><span class='hs-comment'>-- The inputs are:</span>
<a name="line-10166"></a><span class='hs-definition'>editDistance</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10167"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor.</span>
<a name="line-10168"></a>                                         <span class='hs-comment'>-- This is an N x R int64 matrix.</span>
<a name="line-10169"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor.</span>
<a name="line-10170"></a>                               <span class='hs-comment'>-- This is an N-length vector.</span>
<a name="line-10171"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor.</span>
<a name="line-10172"></a>                                            <span class='hs-comment'>-- This is an R-length vector.</span>
<a name="line-10173"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __truth_indices__: The indices of the truth list SparseTensor.</span>
<a name="line-10174"></a>                                            <span class='hs-comment'>-- This is an M x R int64 matrix.</span>
<a name="line-10175"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __truth_values__: The values of the truth list SparseTensor.</span>
<a name="line-10176"></a>                               <span class='hs-comment'>-- This is an M-length vector.</span>
<a name="line-10177"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __truth_shape__: truth indices, vector.</span>
<a name="line-10178"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__: A dense float tensor with rank R - 1.</span>
<a name="line-10179"></a>                <span class='hs-comment'>-- </span>
<a name="line-10180"></a>                <span class='hs-comment'>-- For the example input:</span>
<a name="line-10181"></a>                <span class='hs-comment'>-- </span>
<a name="line-10182"></a>                <span class='hs-comment'>--     // hypothesis represents a 2x1 matrix with variable-length values:</span>
<a name="line-10183"></a>                <span class='hs-comment'>--     //   (0,0) = ["a"]</span>
<a name="line-10184"></a>                <span class='hs-comment'>--     //   (1,0) = ["b"]</span>
<a name="line-10185"></a>                <span class='hs-comment'>--     hypothesis_indices = [[0, 0, 0],</span>
<a name="line-10186"></a>                <span class='hs-comment'>--                           [1, 0, 0]]</span>
<a name="line-10187"></a>                <span class='hs-comment'>--     hypothesis_values = ["a", "b"]</span>
<a name="line-10188"></a>                <span class='hs-comment'>--     hypothesis_shape = [2, 1, 1]</span>
<a name="line-10189"></a>                <span class='hs-comment'>-- </span>
<a name="line-10190"></a>                <span class='hs-comment'>--     // truth represents a 2x2 matrix with variable-length values:</span>
<a name="line-10191"></a>                <span class='hs-comment'>--     //   (0,0) = []</span>
<a name="line-10192"></a>                <span class='hs-comment'>--     //   (0,1) = ["a"]</span>
<a name="line-10193"></a>                <span class='hs-comment'>--     //   (1,0) = ["b", "c"]</span>
<a name="line-10194"></a>                <span class='hs-comment'>--     //   (1,1) = ["a"]</span>
<a name="line-10195"></a>                <span class='hs-comment'>--     truth_indices = [[0, 1, 0],</span>
<a name="line-10196"></a>                <span class='hs-comment'>--                      [1, 0, 0],</span>
<a name="line-10197"></a>                <span class='hs-comment'>--                      [1, 0, 1],</span>
<a name="line-10198"></a>                <span class='hs-comment'>--                      [1, 1, 0]]</span>
<a name="line-10199"></a>                <span class='hs-comment'>--     truth_values = ["a", "b", "c", "a"]</span>
<a name="line-10200"></a>                <span class='hs-comment'>--     truth_shape = [2, 2, 2]</span>
<a name="line-10201"></a>                <span class='hs-comment'>--     normalize = true</span>
<a name="line-10202"></a>                <span class='hs-comment'>-- </span>
<a name="line-10203"></a>                <span class='hs-comment'>-- The output will be:</span>
<a name="line-10204"></a>                <span class='hs-comment'>-- </span>
<a name="line-10205"></a>                <span class='hs-comment'>--     // output is a 2x2 matrix with edit distances normalized by truth lengths.</span>
<a name="line-10206"></a>                <span class='hs-comment'>--     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis</span>
<a name="line-10207"></a>                <span class='hs-comment'>--               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis</span>
<a name="line-10208"></a><span class='hs-definition'>editDistance</span> <span class='hs-varid'>hypothesis_indices</span> <span class='hs-varid'>hypothesis_values</span> <span class='hs-varid'>hypothesis_shape</span> <span class='hs-varid'>truth_indices</span>
<a name="line-10209"></a>             <span class='hs-varid'>truth_values</span> <span class='hs-varid'>truth_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10210"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"EditDistance"</span>
<a name="line-10211"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10212"></a>        <span class='hs-varid'>hypothesis_indices</span> <span class='hs-varid'>hypothesis_values</span> <span class='hs-varid'>hypothesis_shape</span> <span class='hs-varid'>truth_indices</span>
<a name="line-10213"></a>        <span class='hs-varid'>truth_values</span> <span class='hs-varid'>truth_shape</span>
<a name="line-10214"></a><span class='hs-comment'>{-
<a name="line-10215"></a>attr {
<a name="line-10216"></a>  default_value { b: true }
<a name="line-10217"></a>  description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:"
<a name="line-10218"></a>  name: "normalize"
<a name="line-10219"></a>  type: "bool"
<a name="line-10220"></a>}
<a name="line-10221"></a>attr { name: "T" type: "type" }
<a name="line-10222"></a>input_arg {
<a name="line-10223"></a>  description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix."
<a name="line-10224"></a>  name: "hypothesis_indices"
<a name="line-10225"></a>  type: DT_INT64
<a name="line-10226"></a>}
<a name="line-10227"></a>input_arg {
<a name="line-10228"></a>  description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector."
<a name="line-10229"></a>  name: "hypothesis_values"
<a name="line-10230"></a>  type_attr: "T"
<a name="line-10231"></a>}
<a name="line-10232"></a>input_arg {
<a name="line-10233"></a>  description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector."
<a name="line-10234"></a>  name: "hypothesis_shape"
<a name="line-10235"></a>  type: DT_INT64
<a name="line-10236"></a>}
<a name="line-10237"></a>input_arg {
<a name="line-10238"></a>  description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix."
<a name="line-10239"></a>  name: "truth_indices"
<a name="line-10240"></a>  type: DT_INT64
<a name="line-10241"></a>}
<a name="line-10242"></a>input_arg {
<a name="line-10243"></a>  description: "The values of the truth list SparseTensor.\nThis is an M-length vector."
<a name="line-10244"></a>  name: "truth_values"
<a name="line-10245"></a>  type_attr: "T"
<a name="line-10246"></a>}
<a name="line-10247"></a>input_arg {
<a name="line-10248"></a>  description: "truth indices, vector."
<a name="line-10249"></a>  name: "truth_shape"
<a name="line-10250"></a>  type: DT_INT64
<a name="line-10251"></a>}
<a name="line-10252"></a>output_arg {
<a name="line-10253"></a>  description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n    // hypothesis represents a 2x1 matrix with variable-length values:\n    //   (0,0) = [\"a\"]\n    //   (1,0) = [\"b\"]\n    hypothesis_indices = [[0, 0, 0],\n                          [1, 0, 0]]\n    hypothesis_values = [\"a\", \"b\"]\n    hypothesis_shape = [2, 1, 1]\n\n    // truth represents a 2x2 matrix with variable-length values:\n    //   (0,0) = []\n    //   (0,1) = [\"a\"]\n    //   (1,0) = [\"b\", \"c\"]\n    //   (1,1) = [\"a\"]\n    truth_indices = [[0, 1, 0],\n                     [1, 0, 0],\n                     [1, 0, 1],\n                     [1, 1, 0]]\n    truth_values = [\"a\", \"b\", \"c\", \"a\"]\n    truth_shape = [2, 2, 2]\n    normalize = true\n\nThe output will be:\n\n    // output is a 2x2 matrix with edit distances normalized by truth lengths.\n    output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis\n              [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis"
<a name="line-10254"></a>  name: "output"
<a name="line-10255"></a>  type: DT_FLOAT
<a name="line-10256"></a>}
<a name="line-10257"></a>-}</span>
<a name="line-10258"></a>
<a name="line-10259"></a><a name="reverse"></a><span class='hs-comment'>-- | Reverses specific dimensions of a tensor.</span>
<a name="line-10260"></a><span class='hs-comment'>--</span>
<a name="line-10261"></a><span class='hs-comment'>-- Given a `tensor`, and a `bool` tensor `dims` representing the dimensions</span>
<a name="line-10262"></a><span class='hs-comment'>-- of `tensor`, this operation reverses each dimension i of `tensor` where</span>
<a name="line-10263"></a><span class='hs-comment'>-- `dims[i]` is `True`.</span>
<a name="line-10264"></a><span class='hs-comment'>-- </span>
<a name="line-10265"></a><span class='hs-comment'>-- `tensor` can have up to 8 dimensions. The number of dimensions</span>
<a name="line-10266"></a><span class='hs-comment'>-- of `tensor` must equal the number of elements in `dims`. In other words:</span>
<a name="line-10267"></a><span class='hs-comment'>-- </span>
<a name="line-10268"></a><span class='hs-comment'>-- `rank(tensor) = size(dims)`</span>
<a name="line-10269"></a><span class='hs-comment'>-- </span>
<a name="line-10270"></a><span class='hs-comment'>-- For example:</span>
<a name="line-10271"></a><span class='hs-comment'>-- </span>
<a name="line-10272"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-10273"></a><span class='hs-comment'>-- # tensor 't' is [[[[ 0,  1,  2,  3],</span>
<a name="line-10274"></a><span class='hs-comment'>-- #                  [ 4,  5,  6,  7],</span>
<a name="line-10275"></a><span class='hs-comment'>-- #                  [ 8,  9, 10, 11]],</span>
<a name="line-10276"></a><span class='hs-comment'>-- #                 [[12, 13, 14, 15],</span>
<a name="line-10277"></a><span class='hs-comment'>-- #                  [16, 17, 18, 19],</span>
<a name="line-10278"></a><span class='hs-comment'>-- #                  [20, 21, 22, 23]]]]</span>
<a name="line-10279"></a><span class='hs-comment'>-- # tensor 't' shape is [1, 2, 3, 4]</span>
<a name="line-10280"></a><span class='hs-comment'>-- </span>
<a name="line-10281"></a><span class='hs-comment'>-- # 'dims' is [False, False, False, True]</span>
<a name="line-10282"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[ 3,  2,  1,  0],</span>
<a name="line-10283"></a><span class='hs-comment'>--                         [ 7,  6,  5,  4],</span>
<a name="line-10284"></a><span class='hs-comment'>--                         [ 11, 10, 9, 8]],</span>
<a name="line-10285"></a><span class='hs-comment'>--                        [[15, 14, 13, 12],</span>
<a name="line-10286"></a><span class='hs-comment'>--                         [19, 18, 17, 16],</span>
<a name="line-10287"></a><span class='hs-comment'>--                         [23, 22, 21, 20]]]]</span>
<a name="line-10288"></a><span class='hs-comment'>-- </span>
<a name="line-10289"></a><span class='hs-comment'>-- # 'dims' is [False, True, False, False]</span>
<a name="line-10290"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[12, 13, 14, 15],</span>
<a name="line-10291"></a><span class='hs-comment'>--                         [16, 17, 18, 19],</span>
<a name="line-10292"></a><span class='hs-comment'>--                         [20, 21, 22, 23]</span>
<a name="line-10293"></a><span class='hs-comment'>--                        [[ 0,  1,  2,  3],</span>
<a name="line-10294"></a><span class='hs-comment'>--                         [ 4,  5,  6,  7],</span>
<a name="line-10295"></a><span class='hs-comment'>--                         [ 8,  9, 10, 11]]]]</span>
<a name="line-10296"></a><span class='hs-comment'>-- </span>
<a name="line-10297"></a><span class='hs-comment'>-- # 'dims' is [False, False, True, False]</span>
<a name="line-10298"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[8, 9, 10, 11],</span>
<a name="line-10299"></a><span class='hs-comment'>--                         [4, 5, 6, 7],</span>
<a name="line-10300"></a><span class='hs-comment'>--                         [0, 1, 2, 3]]</span>
<a name="line-10301"></a><span class='hs-comment'>--                        [[20, 21, 22, 23],</span>
<a name="line-10302"></a><span class='hs-comment'>--                         [16, 17, 18, 19],</span>
<a name="line-10303"></a><span class='hs-comment'>--                         [12, 13, 14, 15]]]]</span>
<a name="line-10304"></a><span class='hs-comment'>-- ```</span>
<a name="line-10305"></a><span class='hs-definition'>reverse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10306"></a>                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10307"></a>                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span> <span class='hs-conid'>Bool</span><span class='hs-layout'>,</span>
<a name="line-10308"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10309"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-10310"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10311"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: Up to 8-D.</span>
<a name="line-10312"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __dims__: 1-D. The dimensions to reverse.</span>
<a name="line-10313"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The same shape as `tensor`.</span>
<a name="line-10314"></a><span class='hs-definition'>reverse</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>dims</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10315"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Reverse"</span>
<a name="line-10316"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10317"></a>        <span class='hs-varid'>tensor</span> <span class='hs-varid'>dims</span>
<a name="line-10318"></a><span class='hs-comment'>{-
<a name="line-10319"></a>attr {
<a name="line-10320"></a>  allowed_values {
<a name="line-10321"></a>    list {
<a name="line-10322"></a>      type: DT_UINT8
<a name="line-10323"></a>      type: DT_INT8
<a name="line-10324"></a>      type: DT_INT32
<a name="line-10325"></a>      type: DT_INT64
<a name="line-10326"></a>      type: DT_BOOL
<a name="line-10327"></a>      type: DT_HALF
<a name="line-10328"></a>      type: DT_FLOAT
<a name="line-10329"></a>      type: DT_DOUBLE
<a name="line-10330"></a>      type: DT_COMPLEX64
<a name="line-10331"></a>      type: DT_COMPLEX128
<a name="line-10332"></a>    }
<a name="line-10333"></a>  }
<a name="line-10334"></a>  name: "T"
<a name="line-10335"></a>  type: "type"
<a name="line-10336"></a>}
<a name="line-10337"></a>input_arg {
<a name="line-10338"></a>  description: "Up to 8-D." name: "tensor" type_attr: "T"
<a name="line-10339"></a>}
<a name="line-10340"></a>input_arg {
<a name="line-10341"></a>  description: "1-D. The dimensions to reverse."
<a name="line-10342"></a>  name: "dims"
<a name="line-10343"></a>  type: DT_BOOL
<a name="line-10344"></a>}
<a name="line-10345"></a>output_arg {
<a name="line-10346"></a>  description: "The same shape as `tensor`."
<a name="line-10347"></a>  name: "output"
<a name="line-10348"></a>  type_attr: "T"
<a name="line-10349"></a>}
<a name="line-10350"></a>-}</span>
<a name="line-10351"></a>
<a name="line-10352"></a><a name="matrixSetDiag"></a><span class='hs-comment'>-- | Returns a batched matrix tensor with new batched diagonal values.</span>
<a name="line-10353"></a><span class='hs-comment'>--</span>
<a name="line-10354"></a><span class='hs-comment'>-- Given `input` and `diagonal`, this operation returns a tensor with the</span>
<a name="line-10355"></a><span class='hs-comment'>-- same shape and values as `input`, except for the main diagonal of the</span>
<a name="line-10356"></a><span class='hs-comment'>-- innermost matrices.  These will be overwritten by the values in `diagonal`.</span>
<a name="line-10357"></a><span class='hs-comment'>-- </span>
<a name="line-10358"></a><span class='hs-comment'>-- The output is computed as follows:</span>
<a name="line-10359"></a><span class='hs-comment'>-- </span>
<a name="line-10360"></a><span class='hs-comment'>-- Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has</span>
<a name="line-10361"></a><span class='hs-comment'>-- `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a</span>
<a name="line-10362"></a><span class='hs-comment'>-- tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:</span>
<a name="line-10363"></a><span class='hs-comment'>-- </span>
<a name="line-10364"></a><span class='hs-comment'>--   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.</span>
<a name="line-10365"></a><span class='hs-comment'>--   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.</span>
<a name="line-10366"></a><span class='hs-definition'>matrixSetDiag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10367"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Rank `k+1`, where `k &gt;= 1`.</span>
<a name="line-10368"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__: Rank `k`, where `k &gt;= 1`.</span>
<a name="line-10369"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Rank `k+1`, with `output.shape = input.shape`.</span>
<a name="line-10370"></a><span class='hs-definition'>matrixSetDiag</span> <span class='hs-varid'>input</span> <span class='hs-varid'>diagonal</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10371"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixSetDiag"</span>
<a name="line-10372"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10373"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>diagonal</span>
<a name="line-10374"></a><span class='hs-comment'>{-
<a name="line-10375"></a>attr { name: "T" type: "type" }
<a name="line-10376"></a>input_arg {
<a name="line-10377"></a>  description: "Rank `k+1`, where `k &gt;= 1`."
<a name="line-10378"></a>  name: "input"
<a name="line-10379"></a>  type_attr: "T"
<a name="line-10380"></a>}
<a name="line-10381"></a>input_arg {
<a name="line-10382"></a>  description: "Rank `k`, where `k &gt;= 1`."
<a name="line-10383"></a>  name: "diagonal"
<a name="line-10384"></a>  type_attr: "T"
<a name="line-10385"></a>}
<a name="line-10386"></a>output_arg {
<a name="line-10387"></a>  description: "Rank `k+1`, with `output.shape = input.shape`."
<a name="line-10388"></a>  name: "output"
<a name="line-10389"></a>  type_attr: "T"
<a name="line-10390"></a>}
<a name="line-10391"></a>-}</span>
<a name="line-10392"></a>
<a name="line-10393"></a><a name="matrixDiag"></a><span class='hs-comment'>-- | Returns a batched diagonal tensor with a given batched diagonal values.</span>
<a name="line-10394"></a><span class='hs-comment'>--</span>
<a name="line-10395"></a><span class='hs-comment'>-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and</span>
<a name="line-10396"></a><span class='hs-comment'>-- everything else padded with zeros. The diagonal is computed as follows:</span>
<a name="line-10397"></a><span class='hs-comment'>-- </span>
<a name="line-10398"></a><span class='hs-comment'>-- Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a</span>
<a name="line-10399"></a><span class='hs-comment'>-- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:</span>
<a name="line-10400"></a><span class='hs-comment'>-- </span>
<a name="line-10401"></a><span class='hs-comment'>-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.</span>
<a name="line-10402"></a><span class='hs-comment'>-- </span>
<a name="line-10403"></a><span class='hs-comment'>-- For example:</span>
<a name="line-10404"></a><span class='hs-comment'>-- </span>
<a name="line-10405"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-10406"></a><span class='hs-comment'>-- # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]</span>
<a name="line-10407"></a><span class='hs-comment'>-- </span>
<a name="line-10408"></a><span class='hs-comment'>-- and diagonal.shape = (2, 4)</span>
<a name="line-10409"></a><span class='hs-comment'>-- </span>
<a name="line-10410"></a><span class='hs-comment'>-- tf.matrix_diag(diagonal) ==&gt; [[[1, 0, 0, 0]</span>
<a name="line-10411"></a><span class='hs-comment'>--                                      [0, 2, 0, 0]</span>
<a name="line-10412"></a><span class='hs-comment'>--                                      [0, 0, 3, 0]</span>
<a name="line-10413"></a><span class='hs-comment'>--                                      [0, 0, 0, 4]],</span>
<a name="line-10414"></a><span class='hs-comment'>--                                     [[5, 0, 0, 0]</span>
<a name="line-10415"></a><span class='hs-comment'>--                                      [0, 6, 0, 0]</span>
<a name="line-10416"></a><span class='hs-comment'>--                                      [0, 0, 7, 0]</span>
<a name="line-10417"></a><span class='hs-comment'>--                                      [0, 0, 0, 8]]]</span>
<a name="line-10418"></a><span class='hs-comment'>-- </span>
<a name="line-10419"></a><span class='hs-comment'>-- which has shape (2, 4, 4)</span>
<a name="line-10420"></a><span class='hs-comment'>-- ```</span>
<a name="line-10421"></a><span class='hs-definition'>matrixDiag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10422"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__: Rank `k`, where `k &gt;= 1`.</span>
<a name="line-10423"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.</span>
<a name="line-10424"></a><span class='hs-definition'>matrixDiag</span> <span class='hs-varid'>diagonal</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10425"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixDiag"</span>
<a name="line-10426"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10427"></a>        <span class='hs-varid'>diagonal</span>
<a name="line-10428"></a><span class='hs-comment'>{-
<a name="line-10429"></a>attr { name: "T" type: "type" }
<a name="line-10430"></a>input_arg {
<a name="line-10431"></a>  description: "Rank `k`, where `k &gt;= 1`."
<a name="line-10432"></a>  name: "diagonal"
<a name="line-10433"></a>  type_attr: "T"
<a name="line-10434"></a>}
<a name="line-10435"></a>output_arg {
<a name="line-10436"></a>  description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`."
<a name="line-10437"></a>  name: "output"
<a name="line-10438"></a>  type_attr: "T"
<a name="line-10439"></a>}
<a name="line-10440"></a>-}</span>
<a name="line-10441"></a>
<a name="line-10442"></a><a name="diag"></a><span class='hs-comment'>-- | Returns a diagonal tensor with a given diagonal values.</span>
<a name="line-10443"></a><span class='hs-comment'>--</span>
<a name="line-10444"></a><span class='hs-comment'>-- Given a `diagonal`, this operation returns a tensor with the `diagonal` and</span>
<a name="line-10445"></a><span class='hs-comment'>-- everything else padded with zeros. The diagonal is computed as follows:</span>
<a name="line-10446"></a><span class='hs-comment'>-- </span>
<a name="line-10447"></a><span class='hs-comment'>-- Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of</span>
<a name="line-10448"></a><span class='hs-comment'>-- rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:</span>
<a name="line-10449"></a><span class='hs-comment'>-- </span>
<a name="line-10450"></a><span class='hs-comment'>-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.</span>
<a name="line-10451"></a><span class='hs-comment'>-- </span>
<a name="line-10452"></a><span class='hs-comment'>-- For example:</span>
<a name="line-10453"></a><span class='hs-comment'>-- </span>
<a name="line-10454"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-10455"></a><span class='hs-comment'>-- # 'diagonal' is [1, 2, 3, 4]</span>
<a name="line-10456"></a><span class='hs-comment'>-- tf.diag(diagonal) ==&gt; [[1, 0, 0, 0]</span>
<a name="line-10457"></a><span class='hs-comment'>--                        [0, 2, 0, 0]</span>
<a name="line-10458"></a><span class='hs-comment'>--                        [0, 0, 3, 0]</span>
<a name="line-10459"></a><span class='hs-comment'>--                        [0, 0, 0, 4]]</span>
<a name="line-10460"></a><span class='hs-comment'>-- ```</span>
<a name="line-10461"></a><span class='hs-definition'>diag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10462"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10463"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10464"></a>                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10465"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__: Rank k tensor where k is at most 3.</span>
<a name="line-10466"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-10467"></a><span class='hs-definition'>diag</span> <span class='hs-varid'>diagonal</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10468"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Diag"</span>
<a name="line-10469"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10470"></a>        <span class='hs-varid'>diagonal</span>
<a name="line-10471"></a><span class='hs-comment'>{-
<a name="line-10472"></a>attr {
<a name="line-10473"></a>  allowed_values {
<a name="line-10474"></a>    list {
<a name="line-10475"></a>      type: DT_FLOAT
<a name="line-10476"></a>      type: DT_DOUBLE
<a name="line-10477"></a>      type: DT_INT32
<a name="line-10478"></a>      type: DT_INT64
<a name="line-10479"></a>      type: DT_COMPLEX64
<a name="line-10480"></a>      type: DT_COMPLEX128
<a name="line-10481"></a>    }
<a name="line-10482"></a>  }
<a name="line-10483"></a>  name: "T"
<a name="line-10484"></a>  type: "type"
<a name="line-10485"></a>}
<a name="line-10486"></a>input_arg {
<a name="line-10487"></a>  description: "Rank k tensor where k is at most 3."
<a name="line-10488"></a>  name: "diagonal"
<a name="line-10489"></a>  type_attr: "T"
<a name="line-10490"></a>}
<a name="line-10491"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-10492"></a>-}</span>
<a name="line-10493"></a>
<a name="line-10494"></a><a name="immutableConst"></a><span class='hs-comment'>-- | Returns immutable tensor from memory region.</span>
<a name="line-10495"></a><span class='hs-comment'>--</span>
<a name="line-10496"></a><span class='hs-comment'>-- The current implementation memmaps the tensor from a file.</span>
<a name="line-10497"></a><span class='hs-definition'>immutableConst</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10498"></a>                  <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: Shape of the returned tensor.</span>
<a name="line-10499"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __tensor__</span>
<a name="line-10500"></a><span class='hs-definition'>immutableConst</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10501"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ImmutableConst"</span>
<a name="line-10502"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-10503"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-10504"></a>        
<a name="line-10505"></a><span class='hs-comment'>{-
<a name="line-10506"></a>attr {
<a name="line-10507"></a>  description: "Type of the returned tensor."
<a name="line-10508"></a>  name: "dtype"
<a name="line-10509"></a>  type: "type"
<a name="line-10510"></a>}
<a name="line-10511"></a>attr {
<a name="line-10512"></a>  description: "Shape of the returned tensor."
<a name="line-10513"></a>  name: "shape"
<a name="line-10514"></a>  type: "shape"
<a name="line-10515"></a>}
<a name="line-10516"></a>attr {
<a name="line-10517"></a>  description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env."
<a name="line-10518"></a>  name: "memory_region_name"
<a name="line-10519"></a>  type: "string"
<a name="line-10520"></a>}
<a name="line-10521"></a>output_arg { name: "tensor" type_attr: "dtype" }
<a name="line-10522"></a>-}</span>
<a name="line-10523"></a>
<a name="line-10524"></a><span class='hs-comment'>-- | Concatenates tensors along one dimension.</span>
<a name="line-10525"></a>
<a name="line-10526"></a><a name="concat"></a><span class='hs-definition'>concat</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10527"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the</span>
<a name="line-10528"></a>                                   <span class='hs-comment'>-- range [0, rank(values)).</span>
<a name="line-10529"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,</span>
<a name="line-10530"></a>                           <span class='hs-comment'>-- and their sizes must match in all dimensions except `concat_dim`.</span>
<a name="line-10531"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: A `Tensor` with the concatenation of values stacked along the</span>
<a name="line-10532"></a>          <span class='hs-comment'>-- `concat_dim` dimension.  This tensor's shape matches that of `values` except</span>
<a name="line-10533"></a>          <span class='hs-comment'>-- in `concat_dim` where it has the sum of the sizes.</span>
<a name="line-10534"></a><span class='hs-definition'>concat</span> <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10535"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Concat"</span>
<a name="line-10536"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-10537"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-10538"></a>        <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>values</span>
<a name="line-10539"></a>  <span class='hs-keyword'>where</span>
<a name="line-10540"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-10541"></a><span class='hs-comment'>{-
<a name="line-10542"></a>attr { has_minimum: true minimum: 2 name: "N" type: "int" }
<a name="line-10543"></a>attr { name: "T" type: "type" }
<a name="line-10544"></a>input_arg {
<a name="line-10545"></a>  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
<a name="line-10546"></a>  name: "concat_dim"
<a name="line-10547"></a>  type: DT_INT32
<a name="line-10548"></a>}
<a name="line-10549"></a>input_arg {
<a name="line-10550"></a>  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
<a name="line-10551"></a>  name: "values"
<a name="line-10552"></a>  number_attr: "N"
<a name="line-10553"></a>  type_attr: "T"
<a name="line-10554"></a>}
<a name="line-10555"></a>output_arg {
<a name="line-10556"></a>  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
<a name="line-10557"></a>  name: "output"
<a name="line-10558"></a>  type_attr: "T"
<a name="line-10559"></a>}
<a name="line-10560"></a>-}</span>
<a name="line-10561"></a>
<a name="line-10562"></a><a name="unpack"></a><span class='hs-comment'>-- | Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.</span>
<a name="line-10563"></a><span class='hs-comment'>--</span>
<a name="line-10564"></a><span class='hs-comment'>-- Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.</span>
<a name="line-10565"></a><span class='hs-comment'>-- For example, given a tensor of shape `(A, B, C, D)`;</span>
<a name="line-10566"></a><span class='hs-comment'>-- </span>
<a name="line-10567"></a><span class='hs-comment'>-- If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`</span>
<a name="line-10568"></a><span class='hs-comment'>--   and each tensor in `output` will have shape `(B, C, D)`. (Note that the</span>
<a name="line-10569"></a><span class='hs-comment'>--   dimension unpacked along is gone, unlike `split`).</span>
<a name="line-10570"></a><span class='hs-comment'>-- </span>
<a name="line-10571"></a><span class='hs-comment'>-- If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`</span>
<a name="line-10572"></a><span class='hs-comment'>--   and each tensor in `output` will have shape `(A, C, D)`.</span>
<a name="line-10573"></a><span class='hs-comment'>-- Etc.</span>
<a name="line-10574"></a><span class='hs-comment'>-- </span>
<a name="line-10575"></a><span class='hs-comment'>-- This is the opposite of `pack`.</span>
<a name="line-10576"></a><span class='hs-definition'>unpack</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num__</span>
<a name="line-10577"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`.</span>
<a name="line-10578"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __output__: The list of tensors unpacked from `value`.</span>
<a name="line-10579"></a><span class='hs-definition'>unpack</span> <span class='hs-varid'>num</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10580"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Unpack"</span>
<a name="line-10581"></a>                       <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-10582"></a>                       <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num</span><span class='hs-layout'>)</span>
<a name="line-10583"></a>        <span class='hs-varid'>value</span>
<a name="line-10584"></a><span class='hs-comment'>{-
<a name="line-10585"></a>attr { has_minimum: true name: "num" type: "int" }
<a name="line-10586"></a>attr { name: "T" type: "type" }
<a name="line-10587"></a>attr {
<a name="line-10588"></a>  default_value { i: 0 }
<a name="line-10589"></a>  description: "Dimension along which to unpack.  Negative values wrap around, so the\nvalid range is `[-R, R)`."
<a name="line-10590"></a>  name: "axis"
<a name="line-10591"></a>  type: "int"
<a name="line-10592"></a>}
<a name="line-10593"></a>input_arg {
<a name="line-10594"></a>  description: "1-D or higher, with `axis` dimension size equal to `num`."
<a name="line-10595"></a>  name: "value"
<a name="line-10596"></a>  type_attr: "T"
<a name="line-10597"></a>}
<a name="line-10598"></a>output_arg {
<a name="line-10599"></a>  description: "The list of tensors unpacked from `value`."
<a name="line-10600"></a>  name: "output"
<a name="line-10601"></a>  number_attr: "num"
<a name="line-10602"></a>  type_attr: "T"
<a name="line-10603"></a>}
<a name="line-10604"></a>-}</span>
<a name="line-10605"></a>
<a name="line-10606"></a><span class='hs-comment'>-- | Output a fact about factorials.</span>
<a name="line-10607"></a>
<a name="line-10608"></a><a name="fact"></a><span class='hs-definition'>fact</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __fact__</span>
<a name="line-10609"></a><span class='hs-definition'>fact</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10610"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Fact"</span><span class='hs-layout'>)</span>
<a name="line-10611"></a>        
<a name="line-10612"></a><span class='hs-comment'>{-
<a name="line-10613"></a>output_arg { name: "fact" type: DT_STRING }
<a name="line-10614"></a>-}</span>
<a name="line-10615"></a>
<a name="line-10616"></a><a name="abs"></a><span class='hs-comment'>-- | Computes the absolute value of a tensor.</span>
<a name="line-10617"></a><span class='hs-comment'>--</span>
<a name="line-10618"></a><span class='hs-comment'>-- Given a tensor `x`, this operation returns a tensor containing the absolute</span>
<a name="line-10619"></a><span class='hs-comment'>-- value of each element in `x`. For example, if x is an input element and y is</span>
<a name="line-10620"></a><span class='hs-comment'>-- an output element, this operation computes \\(y = |x|\\).</span>
<a name="line-10621"></a><span class='hs-definition'>abs</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10622"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-10623"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-10624"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-10625"></a><span class='hs-definition'>abs</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10626"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Abs"</span>
<a name="line-10627"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10628"></a>        <span class='hs-varid'>x</span>
<a name="line-10629"></a><span class='hs-comment'>{-
<a name="line-10630"></a>attr {
<a name="line-10631"></a>  allowed_values {
<a name="line-10632"></a>    list {
<a name="line-10633"></a>      type: DT_HALF
<a name="line-10634"></a>      type: DT_FLOAT
<a name="line-10635"></a>      type: DT_DOUBLE
<a name="line-10636"></a>      type: DT_INT32
<a name="line-10637"></a>      type: DT_INT64
<a name="line-10638"></a>    }
<a name="line-10639"></a>  }
<a name="line-10640"></a>  name: "T"
<a name="line-10641"></a>  type: "type"
<a name="line-10642"></a>}
<a name="line-10643"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-10644"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-10645"></a>-}</span>
<a name="line-10646"></a>
<a name="line-10647"></a><a name="softmax"></a><span class='hs-comment'>-- | Computes softmax activations.</span>
<a name="line-10648"></a><span class='hs-comment'>--</span>
<a name="line-10649"></a><span class='hs-comment'>-- For each batch `i` and class `j` we have</span>
<a name="line-10650"></a><span class='hs-comment'>-- </span>
<a name="line-10651"></a><span class='hs-comment'>--     softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))</span>
<a name="line-10652"></a><span class='hs-definition'>softmax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-10653"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10654"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.</span>
<a name="line-10655"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __softmax__: Same shape as `logits`.</span>
<a name="line-10656"></a><span class='hs-definition'>softmax</span> <span class='hs-varid'>logits</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10657"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Softmax"</span>
<a name="line-10658"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10659"></a>        <span class='hs-varid'>logits</span>
<a name="line-10660"></a><span class='hs-comment'>{-
<a name="line-10661"></a>attr {
<a name="line-10662"></a>  allowed_values {
<a name="line-10663"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-10664"></a>  }
<a name="line-10665"></a>  name: "T"
<a name="line-10666"></a>  type: "type"
<a name="line-10667"></a>}
<a name="line-10668"></a>input_arg {
<a name="line-10669"></a>  description: "2-D with shape `[batch_size, num_classes]`."
<a name="line-10670"></a>  name: "logits"
<a name="line-10671"></a>  type_attr: "T"
<a name="line-10672"></a>}
<a name="line-10673"></a>output_arg {
<a name="line-10674"></a>  description: "Same shape as `logits`."
<a name="line-10675"></a>  name: "softmax"
<a name="line-10676"></a>  type_attr: "T"
<a name="line-10677"></a>}
<a name="line-10678"></a>-}</span>
<a name="line-10679"></a>
<a name="line-10680"></a><a name="reverseV2"></a><span class='hs-comment'>-- | Reverses specific dimensions of a tensor.</span>
<a name="line-10681"></a><span class='hs-comment'>--</span>
<a name="line-10682"></a><span class='hs-comment'>-- Given a `tensor`, and a `int32` tensor `axis` representing the set of</span>
<a name="line-10683"></a><span class='hs-comment'>-- dimensions of `tensor` to reverse. This operation reverses each dimension</span>
<a name="line-10684"></a><span class='hs-comment'>-- `i` for which there exists `j` s.t. `axis[j] == i`.</span>
<a name="line-10685"></a><span class='hs-comment'>-- </span>
<a name="line-10686"></a><span class='hs-comment'>-- `tensor` can have up to 8 dimensions. The number of dimensions specified</span>
<a name="line-10687"></a><span class='hs-comment'>-- in `axis` may be 0 or more entries. If an index is specified more than</span>
<a name="line-10688"></a><span class='hs-comment'>-- once, a InvalidArgument error is raised.</span>
<a name="line-10689"></a><span class='hs-comment'>-- </span>
<a name="line-10690"></a><span class='hs-comment'>-- For example:</span>
<a name="line-10691"></a><span class='hs-comment'>-- </span>
<a name="line-10692"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-10693"></a><span class='hs-comment'>-- # tensor 't' is [[[[ 0,  1,  2,  3],</span>
<a name="line-10694"></a><span class='hs-comment'>-- #                  [ 4,  5,  6,  7],</span>
<a name="line-10695"></a><span class='hs-comment'>-- #                  [ 8,  9, 10, 11]],</span>
<a name="line-10696"></a><span class='hs-comment'>-- #                 [[12, 13, 14, 15],</span>
<a name="line-10697"></a><span class='hs-comment'>-- #                  [16, 17, 18, 19],</span>
<a name="line-10698"></a><span class='hs-comment'>-- #                  [20, 21, 22, 23]]]]</span>
<a name="line-10699"></a><span class='hs-comment'>-- # tensor 't' shape is [1, 2, 3, 4]</span>
<a name="line-10700"></a><span class='hs-comment'>-- </span>
<a name="line-10701"></a><span class='hs-comment'>-- # 'dims' is [3] or 'dims' is -1</span>
<a name="line-10702"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[ 3,  2,  1,  0],</span>
<a name="line-10703"></a><span class='hs-comment'>--                         [ 7,  6,  5,  4],</span>
<a name="line-10704"></a><span class='hs-comment'>--                         [ 11, 10, 9, 8]],</span>
<a name="line-10705"></a><span class='hs-comment'>--                        [[15, 14, 13, 12],</span>
<a name="line-10706"></a><span class='hs-comment'>--                         [19, 18, 17, 16],</span>
<a name="line-10707"></a><span class='hs-comment'>--                         [23, 22, 21, 20]]]]</span>
<a name="line-10708"></a><span class='hs-comment'>-- </span>
<a name="line-10709"></a><span class='hs-comment'>-- # 'dims' is '[1]' (or 'dims' is '[-3]')</span>
<a name="line-10710"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[12, 13, 14, 15],</span>
<a name="line-10711"></a><span class='hs-comment'>--                         [16, 17, 18, 19],</span>
<a name="line-10712"></a><span class='hs-comment'>--                         [20, 21, 22, 23]</span>
<a name="line-10713"></a><span class='hs-comment'>--                        [[ 0,  1,  2,  3],</span>
<a name="line-10714"></a><span class='hs-comment'>--                         [ 4,  5,  6,  7],</span>
<a name="line-10715"></a><span class='hs-comment'>--                         [ 8,  9, 10, 11]]]]</span>
<a name="line-10716"></a><span class='hs-comment'>-- </span>
<a name="line-10717"></a><span class='hs-comment'>-- # 'dims' is '[2]' (or 'dims' is '[-2]')</span>
<a name="line-10718"></a><span class='hs-comment'>-- reverse(t, dims) ==&gt; [[[[8, 9, 10, 11],</span>
<a name="line-10719"></a><span class='hs-comment'>--                         [4, 5, 6, 7],</span>
<a name="line-10720"></a><span class='hs-comment'>--                         [0, 1, 2, 3]]</span>
<a name="line-10721"></a><span class='hs-comment'>--                        [[20, 21, 22, 23],</span>
<a name="line-10722"></a><span class='hs-comment'>--                         [16, 17, 18, 19],</span>
<a name="line-10723"></a><span class='hs-comment'>--                         [12, 13, 14, 15]]]]</span>
<a name="line-10724"></a><span class='hs-comment'>-- ```</span>
<a name="line-10725"></a><span class='hs-definition'>reverseV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10726"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-10727"></a>                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10728"></a>                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10729"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span> <span class='hs-conid'>Bool</span><span class='hs-layout'>,</span>
<a name="line-10730"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10731"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-10732"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-10733"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10734"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: Up to 8-D.</span>
<a name="line-10735"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __axis__: 1-D. The indices of the dimensions to reverse.</span>
<a name="line-10736"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The same shape as `tensor`.</span>
<a name="line-10737"></a><span class='hs-definition'>reverseV2</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>axis</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10738"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReverseV2"</span>
<a name="line-10739"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span>
<a name="line-10740"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10741"></a>        <span class='hs-varid'>tensor</span> <span class='hs-varid'>axis</span>
<a name="line-10742"></a><span class='hs-comment'>{-
<a name="line-10743"></a>attr {
<a name="line-10744"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-10745"></a>  default_value { type: DT_INT32 }
<a name="line-10746"></a>  name: "Tidx"
<a name="line-10747"></a>  type: "type"
<a name="line-10748"></a>}
<a name="line-10749"></a>attr {
<a name="line-10750"></a>  allowed_values {
<a name="line-10751"></a>    list {
<a name="line-10752"></a>      type: DT_UINT8
<a name="line-10753"></a>      type: DT_INT8
<a name="line-10754"></a>      type: DT_INT32
<a name="line-10755"></a>      type: DT_INT64
<a name="line-10756"></a>      type: DT_BOOL
<a name="line-10757"></a>      type: DT_HALF
<a name="line-10758"></a>      type: DT_FLOAT
<a name="line-10759"></a>      type: DT_DOUBLE
<a name="line-10760"></a>      type: DT_COMPLEX64
<a name="line-10761"></a>      type: DT_COMPLEX128
<a name="line-10762"></a>    }
<a name="line-10763"></a>  }
<a name="line-10764"></a>  name: "T"
<a name="line-10765"></a>  type: "type"
<a name="line-10766"></a>}
<a name="line-10767"></a>input_arg {
<a name="line-10768"></a>  description: "Up to 8-D." name: "tensor" type_attr: "T"
<a name="line-10769"></a>}
<a name="line-10770"></a>input_arg {
<a name="line-10771"></a>  description: "1-D. The indices of the dimensions to reverse."
<a name="line-10772"></a>  name: "axis"
<a name="line-10773"></a>  type_attr: "Tidx"
<a name="line-10774"></a>}
<a name="line-10775"></a>output_arg {
<a name="line-10776"></a>  description: "The same shape as `tensor`."
<a name="line-10777"></a>  name: "output"
<a name="line-10778"></a>  type_attr: "T"
<a name="line-10779"></a>}
<a name="line-10780"></a>-}</span>
<a name="line-10781"></a>
<a name="line-10782"></a><span class='hs-comment'>-- | Return a tensor with the same shape and contents as the input tensor or value.</span>
<a name="line-10783"></a>
<a name="line-10784"></a><a name="identity"></a><span class='hs-definition'>identity</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-10785"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-10786"></a><span class='hs-definition'>identity</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10787"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Identity"</span>
<a name="line-10788"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10789"></a>        <span class='hs-varid'>input</span>
<a name="line-10790"></a><span class='hs-comment'>{-
<a name="line-10791"></a>attr { name: "T" type: "type" }
<a name="line-10792"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-10793"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-10794"></a>-}</span>
<a name="line-10795"></a>
<a name="line-10796"></a><a name="sparseAdd"></a><span class='hs-comment'>-- | Adds two `SparseTensor` objects to produce another `SparseTensor`.</span>
<a name="line-10797"></a><span class='hs-comment'>--</span>
<a name="line-10798"></a><span class='hs-comment'>-- The input `SparseTensor` objects' indices are assumed ordered in standard</span>
<a name="line-10799"></a><span class='hs-comment'>-- lexicographic order.  If this is not the case, before this step run</span>
<a name="line-10800"></a><span class='hs-comment'>-- `SparseReorder` to restore index ordering.</span>
<a name="line-10801"></a><span class='hs-comment'>-- </span>
<a name="line-10802"></a><span class='hs-comment'>-- By default, if two values sum to zero at some index, the output `SparseTensor`</span>
<a name="line-10803"></a><span class='hs-comment'>-- would still include that particular location in its index, storing a zero in the</span>
<a name="line-10804"></a><span class='hs-comment'>-- corresponding value slot.  To override this, callers can specify `thresh`,</span>
<a name="line-10805"></a><span class='hs-comment'>-- indicating that if the sum has a magnitude strictly smaller than `thresh`, its</span>
<a name="line-10806"></a><span class='hs-comment'>-- corresponding value and index would then not be included.  In particular,</span>
<a name="line-10807"></a><span class='hs-comment'>-- `thresh == 0` (default) means everything is kept and actual thresholding happens</span>
<a name="line-10808"></a><span class='hs-comment'>-- only for a positive value.</span>
<a name="line-10809"></a><span class='hs-comment'>-- </span>
<a name="line-10810"></a><span class='hs-comment'>-- In the following shapes, `nnz` is the count after taking `thresh` into account.</span>
<a name="line-10811"></a><span class='hs-definition'>sparseAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-varid'>treal</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10812"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10813"></a>                                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10814"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-10815"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10816"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10817"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-10818"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-10819"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-10820"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10821"></a>                                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>treal</span><span class='hs-layout'>,</span>
<a name="line-10822"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-10823"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10824"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-10825"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-10826"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-10827"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-10828"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-10829"></a>                                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>treal</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10830"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.</span>
<a name="line-10831"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a_values__: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.</span>
<a name="line-10832"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_shape__: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.</span>
<a name="line-10833"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_indices__: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.</span>
<a name="line-10834"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b_values__: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.</span>
<a name="line-10835"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_shape__: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.</span>
<a name="line-10836"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>treal</span> <span class='hs-comment'>-- ^ __thresh__: 0-D.  The magnitude threshold that determines if an output value/index</span>
<a name="line-10837"></a>                                <span class='hs-comment'>-- pair takes space.</span>
<a name="line-10838"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10839"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-10840"></a>             <span class='hs-comment'>-- ^ (__sum_indices__, __sum_values__, __sum_shape__)</span>
<a name="line-10841"></a>             <span class='hs-comment'>--</span>
<a name="line-10842"></a>             <span class='hs-comment'>-- * __sum_indices__</span>
<a name="line-10843"></a>             <span class='hs-comment'>--</span>
<a name="line-10844"></a>             <span class='hs-comment'>-- * __sum_values__</span>
<a name="line-10845"></a>             <span class='hs-comment'>--</span>
<a name="line-10846"></a>             <span class='hs-comment'>-- * __sum_shape__</span>
<a name="line-10847"></a><span class='hs-definition'>sparseAdd</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span> <span class='hs-varid'>b_shape</span>
<a name="line-10848"></a>          <span class='hs-varid'>thresh</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10849"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseAdd"</span>
<a name="line-10850"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-10851"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Treal"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>treal</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10852"></a>        <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span> <span class='hs-varid'>b_shape</span> <span class='hs-varid'>thresh</span>
<a name="line-10853"></a><span class='hs-comment'>{-
<a name="line-10854"></a>attr {
<a name="line-10855"></a>  allowed_values {
<a name="line-10856"></a>    list {
<a name="line-10857"></a>      type: DT_FLOAT
<a name="line-10858"></a>      type: DT_DOUBLE
<a name="line-10859"></a>      type: DT_INT64
<a name="line-10860"></a>      type: DT_INT32
<a name="line-10861"></a>      type: DT_UINT8
<a name="line-10862"></a>      type: DT_UINT16
<a name="line-10863"></a>      type: DT_INT16
<a name="line-10864"></a>      type: DT_INT8
<a name="line-10865"></a>      type: DT_COMPLEX64
<a name="line-10866"></a>      type: DT_COMPLEX128
<a name="line-10867"></a>      type: DT_QINT8
<a name="line-10868"></a>      type: DT_QUINT8
<a name="line-10869"></a>      type: DT_QINT32
<a name="line-10870"></a>      type: DT_HALF
<a name="line-10871"></a>    }
<a name="line-10872"></a>  }
<a name="line-10873"></a>  name: "T"
<a name="line-10874"></a>  type: "type"
<a name="line-10875"></a>}
<a name="line-10876"></a>attr {
<a name="line-10877"></a>  allowed_values {
<a name="line-10878"></a>    list {
<a name="line-10879"></a>      type: DT_FLOAT
<a name="line-10880"></a>      type: DT_DOUBLE
<a name="line-10881"></a>      type: DT_INT32
<a name="line-10882"></a>      type: DT_INT64
<a name="line-10883"></a>      type: DT_UINT8
<a name="line-10884"></a>      type: DT_INT16
<a name="line-10885"></a>      type: DT_INT8
<a name="line-10886"></a>      type: DT_UINT16
<a name="line-10887"></a>      type: DT_HALF
<a name="line-10888"></a>    }
<a name="line-10889"></a>  }
<a name="line-10890"></a>  name: "Treal"
<a name="line-10891"></a>  type: "type"
<a name="line-10892"></a>}
<a name="line-10893"></a>input_arg {
<a name="line-10894"></a>  description: "2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix."
<a name="line-10895"></a>  name: "a_indices"
<a name="line-10896"></a>  type: DT_INT64
<a name="line-10897"></a>}
<a name="line-10898"></a>input_arg {
<a name="line-10899"></a>  description: "1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector."
<a name="line-10900"></a>  name: "a_values"
<a name="line-10901"></a>  type_attr: "T"
<a name="line-10902"></a>}
<a name="line-10903"></a>input_arg {
<a name="line-10904"></a>  description: "1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector."
<a name="line-10905"></a>  name: "a_shape"
<a name="line-10906"></a>  type: DT_INT64
<a name="line-10907"></a>}
<a name="line-10908"></a>input_arg {
<a name="line-10909"></a>  description: "2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix."
<a name="line-10910"></a>  name: "b_indices"
<a name="line-10911"></a>  type: DT_INT64
<a name="line-10912"></a>}
<a name="line-10913"></a>input_arg {
<a name="line-10914"></a>  description: "1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector."
<a name="line-10915"></a>  name: "b_values"
<a name="line-10916"></a>  type_attr: "T"
<a name="line-10917"></a>}
<a name="line-10918"></a>input_arg {
<a name="line-10919"></a>  description: "1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector."
<a name="line-10920"></a>  name: "b_shape"
<a name="line-10921"></a>  type: DT_INT64
<a name="line-10922"></a>}
<a name="line-10923"></a>input_arg {
<a name="line-10924"></a>  description: "0-D.  The magnitude threshold that determines if an output value/index\npair takes space."
<a name="line-10925"></a>  name: "thresh"
<a name="line-10926"></a>  type_attr: "Treal"
<a name="line-10927"></a>}
<a name="line-10928"></a>output_arg { name: "sum_indices" type: DT_INT64 }
<a name="line-10929"></a>output_arg { name: "sum_values" type_attr: "T" }
<a name="line-10930"></a>output_arg { name: "sum_shape" type: DT_INT64 }
<a name="line-10931"></a>-}</span>
<a name="line-10932"></a>
<a name="line-10933"></a><a name="sparseApplyCenteredRMSProp"></a><span class='hs-comment'>-- | Update '*var' according to the centered RMSProp algorithm.</span>
<a name="line-10934"></a><span class='hs-comment'>--</span>
<a name="line-10935"></a><span class='hs-comment'>-- The centered RMSProp algorithm uses an estimate of the centered second moment</span>
<a name="line-10936"></a><span class='hs-comment'>-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which</span>
<a name="line-10937"></a><span class='hs-comment'>-- uses the (uncentered) second moment. This often helps with training, but is</span>
<a name="line-10938"></a><span class='hs-comment'>-- slightly more expensive in terms of computation and memory.</span>
<a name="line-10939"></a><span class='hs-comment'>-- </span>
<a name="line-10940"></a><span class='hs-comment'>-- Note that in dense implementation of this algorithm, mg, ms, and mom will</span>
<a name="line-10941"></a><span class='hs-comment'>-- update even if the grad is zero, but in this sparse implementation, mg, ms,</span>
<a name="line-10942"></a><span class='hs-comment'>-- and mom will not update in iterations during which the grad is zero.</span>
<a name="line-10943"></a><span class='hs-comment'>-- </span>
<a name="line-10944"></a><span class='hs-comment'>-- mean_square = decay * mean_square + (1-decay) * gradient ** 2</span>
<a name="line-10945"></a><span class='hs-comment'>-- mean_grad = decay * mean_grad + (1-decay) * gradient</span>
<a name="line-10946"></a><span class='hs-comment'>-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)</span>
<a name="line-10947"></a><span class='hs-comment'>-- </span>
<a name="line-10948"></a><span class='hs-comment'>-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad</span>
<a name="line-10949"></a><span class='hs-comment'>-- mom &lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)</span>
<a name="line-10950"></a><span class='hs-comment'>-- var &lt;- var - mom</span>
<a name="line-10951"></a><span class='hs-definition'>sparseApplyCenteredRMSProp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>v10</span> <span class='hs-varid'>t</span>
<a name="line-10952"></a>                              <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-10953"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10954"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-10955"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-10956"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10957"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-10958"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-10959"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-10960"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-10961"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-10962"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-10963"></a>                              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-10964"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mg__: Should be from a Variable().</span>
<a name="line-10965"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ms__: Should be from a Variable().</span>
<a name="line-10966"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mom__: Should be from a Variable().</span>
<a name="line-10967"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-10968"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay rate. Must be a scalar.</span>
<a name="line-10969"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__</span>
<a name="line-10970"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Ridge term. Must be a scalar.</span>
<a name="line-10971"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-10972"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v10</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.</span>
<a name="line-10973"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-10974"></a><span class='hs-definition'>sparseApplyCenteredRMSProp</span> <span class='hs-varid'>var</span> <span class='hs-varid'>mg</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-10975"></a>                           <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-10976"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyCenteredRMSProp"</span>
<a name="line-10977"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-10978"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-10979"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>mg</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-10980"></a><span class='hs-comment'>{-
<a name="line-10981"></a>attr {
<a name="line-10982"></a>  allowed_values {
<a name="line-10983"></a>    list {
<a name="line-10984"></a>      type: DT_FLOAT
<a name="line-10985"></a>      type: DT_DOUBLE
<a name="line-10986"></a>      type: DT_INT64
<a name="line-10987"></a>      type: DT_INT32
<a name="line-10988"></a>      type: DT_UINT8
<a name="line-10989"></a>      type: DT_UINT16
<a name="line-10990"></a>      type: DT_INT16
<a name="line-10991"></a>      type: DT_INT8
<a name="line-10992"></a>      type: DT_COMPLEX64
<a name="line-10993"></a>      type: DT_COMPLEX128
<a name="line-10994"></a>      type: DT_QINT8
<a name="line-10995"></a>      type: DT_QUINT8
<a name="line-10996"></a>      type: DT_QINT32
<a name="line-10997"></a>      type: DT_HALF
<a name="line-10998"></a>    }
<a name="line-10999"></a>  }
<a name="line-11000"></a>  name: "T"
<a name="line-11001"></a>  type: "type"
<a name="line-11002"></a>}
<a name="line-11003"></a>attr {
<a name="line-11004"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-11005"></a>  name: "Tindices"
<a name="line-11006"></a>  type: "type"
<a name="line-11007"></a>}
<a name="line-11008"></a>attr {
<a name="line-11009"></a>  default_value { b: false }
<a name="line-11010"></a>  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11011"></a>  name: "use_locking"
<a name="line-11012"></a>  type: "bool"
<a name="line-11013"></a>}
<a name="line-11014"></a>input_arg {
<a name="line-11015"></a>  description: "Should be from a Variable()."
<a name="line-11016"></a>  is_ref: true
<a name="line-11017"></a>  name: "var"
<a name="line-11018"></a>  type_attr: "T"
<a name="line-11019"></a>}
<a name="line-11020"></a>input_arg {
<a name="line-11021"></a>  description: "Should be from a Variable()."
<a name="line-11022"></a>  is_ref: true
<a name="line-11023"></a>  name: "mg"
<a name="line-11024"></a>  type_attr: "T"
<a name="line-11025"></a>}
<a name="line-11026"></a>input_arg {
<a name="line-11027"></a>  description: "Should be from a Variable()."
<a name="line-11028"></a>  is_ref: true
<a name="line-11029"></a>  name: "ms"
<a name="line-11030"></a>  type_attr: "T"
<a name="line-11031"></a>}
<a name="line-11032"></a>input_arg {
<a name="line-11033"></a>  description: "Should be from a Variable()."
<a name="line-11034"></a>  is_ref: true
<a name="line-11035"></a>  name: "mom"
<a name="line-11036"></a>  type_attr: "T"
<a name="line-11037"></a>}
<a name="line-11038"></a>input_arg {
<a name="line-11039"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-11040"></a>  name: "lr"
<a name="line-11041"></a>  type_attr: "T"
<a name="line-11042"></a>}
<a name="line-11043"></a>input_arg {
<a name="line-11044"></a>  description: "Decay rate. Must be a scalar."
<a name="line-11045"></a>  name: "rho"
<a name="line-11046"></a>  type_attr: "T"
<a name="line-11047"></a>}
<a name="line-11048"></a>input_arg { name: "momentum" type_attr: "T" }
<a name="line-11049"></a>input_arg {
<a name="line-11050"></a>  description: "Ridge term. Must be a scalar."
<a name="line-11051"></a>  name: "epsilon"
<a name="line-11052"></a>  type_attr: "T"
<a name="line-11053"></a>}
<a name="line-11054"></a>input_arg {
<a name="line-11055"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11056"></a>}
<a name="line-11057"></a>input_arg {
<a name="line-11058"></a>  description: "A vector of indices into the first dimension of var, ms and mom."
<a name="line-11059"></a>  name: "indices"
<a name="line-11060"></a>  type_attr: "Tindices"
<a name="line-11061"></a>}
<a name="line-11062"></a>output_arg {
<a name="line-11063"></a>  description: "Same as \"var\"."
<a name="line-11064"></a>  is_ref: true
<a name="line-11065"></a>  name: "out"
<a name="line-11066"></a>  type_attr: "T"
<a name="line-11067"></a>}
<a name="line-11068"></a>-}</span>
<a name="line-11069"></a>
<a name="line-11070"></a><span class='hs-comment'>-- | Add all input tensors element wise.</span>
<a name="line-11071"></a>
<a name="line-11072"></a><a name="addN"></a><span class='hs-definition'>addN</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11073"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11074"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11075"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11076"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-11077"></a>                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11078"></a>        <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: Must all be the same size and shape.</span>
<a name="line-11079"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sum__</span>
<a name="line-11080"></a><span class='hs-definition'>addN</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11081"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AddN"</span>
<a name="line-11082"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-11083"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-11084"></a>        <span class='hs-varid'>inputs</span>
<a name="line-11085"></a>  <span class='hs-keyword'>where</span>
<a name="line-11086"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-11087"></a><span class='hs-comment'>{-
<a name="line-11088"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-11089"></a>attr {
<a name="line-11090"></a>  allowed_values {
<a name="line-11091"></a>    list {
<a name="line-11092"></a>      type: DT_FLOAT
<a name="line-11093"></a>      type: DT_DOUBLE
<a name="line-11094"></a>      type: DT_INT64
<a name="line-11095"></a>      type: DT_INT32
<a name="line-11096"></a>      type: DT_UINT8
<a name="line-11097"></a>      type: DT_UINT16
<a name="line-11098"></a>      type: DT_INT16
<a name="line-11099"></a>      type: DT_INT8
<a name="line-11100"></a>      type: DT_COMPLEX64
<a name="line-11101"></a>      type: DT_COMPLEX128
<a name="line-11102"></a>      type: DT_QINT8
<a name="line-11103"></a>      type: DT_QUINT8
<a name="line-11104"></a>      type: DT_QINT32
<a name="line-11105"></a>      type: DT_HALF
<a name="line-11106"></a>    }
<a name="line-11107"></a>  }
<a name="line-11108"></a>  name: "T"
<a name="line-11109"></a>  type: "type"
<a name="line-11110"></a>}
<a name="line-11111"></a>input_arg {
<a name="line-11112"></a>  description: "Must all be the same size and shape."
<a name="line-11113"></a>  name: "inputs"
<a name="line-11114"></a>  number_attr: "N"
<a name="line-11115"></a>  type_attr: "T"
<a name="line-11116"></a>}
<a name="line-11117"></a>output_arg { name: "sum" type_attr: "T" }
<a name="line-11118"></a>-}</span>
<a name="line-11119"></a>
<a name="line-11120"></a><a name="concatOffset"></a><span class='hs-comment'>-- | Computes offsets of concat inputs within its output.</span>
<a name="line-11121"></a><span class='hs-comment'>--</span>
<a name="line-11122"></a><span class='hs-comment'>-- For example:</span>
<a name="line-11123"></a><span class='hs-comment'>-- </span>
<a name="line-11124"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-11125"></a><span class='hs-comment'>-- # 'x' is [2, 2, 7]</span>
<a name="line-11126"></a><span class='hs-comment'>-- # 'y' is [2, 3, 7]</span>
<a name="line-11127"></a><span class='hs-comment'>-- # 'z' is [2, 5, 7]</span>
<a name="line-11128"></a><span class='hs-comment'>-- concat_offset(2, [x, y, z]) =&gt; [0, 0, 0], [0, 2, 0], [0, 5, 0]</span>
<a name="line-11129"></a><span class='hs-comment'>-- ```</span>
<a name="line-11130"></a><span class='hs-definition'>concatOffset</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __concat_dim__: The dimension along which to concatenate.</span>
<a name="line-11131"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated.</span>
<a name="line-11132"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __offset__: The `N` int32 vectors representing the starting offset</span>
<a name="line-11133"></a>                <span class='hs-comment'>--         of input tensors within the concatenated output.</span>
<a name="line-11134"></a>                <span class='hs-comment'>-- </span>
<a name="line-11135"></a>                <span class='hs-comment'>-- This is typically used by gradient computations for a concat operation.</span>
<a name="line-11136"></a><span class='hs-definition'>concatOffset</span> <span class='hs-varid'>concat_dim</span>
<a name="line-11137"></a>             <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"shape"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11138"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>n</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ConcatOffset"</span>
<a name="line-11139"></a>                     <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-11140"></a>        <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>shape</span>
<a name="line-11141"></a>  <span class='hs-keyword'>where</span>
<a name="line-11142"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-11143"></a><span class='hs-comment'>{-
<a name="line-11144"></a>attr { has_minimum: true minimum: 2 name: "N" type: "int" }
<a name="line-11145"></a>input_arg {
<a name="line-11146"></a>  description: "The dimension along which to concatenate."
<a name="line-11147"></a>  name: "concat_dim"
<a name="line-11148"></a>  type: DT_INT32
<a name="line-11149"></a>}
<a name="line-11150"></a>input_arg {
<a name="line-11151"></a>  description: "The `N` int32 vectors representing shape of tensors being concatenated."
<a name="line-11152"></a>  name: "shape"
<a name="line-11153"></a>  number_attr: "N"
<a name="line-11154"></a>  type: DT_INT32
<a name="line-11155"></a>}
<a name="line-11156"></a>output_arg {
<a name="line-11157"></a>  description: "The `N` int32 vectors representing the starting offset\n        of input tensors within the concatenated output.\n\nThis is typically used by gradient computations for a concat operation."
<a name="line-11158"></a>  name: "offset"
<a name="line-11159"></a>  number_attr: "N"
<a name="line-11160"></a>  type: DT_INT32
<a name="line-11161"></a>}
<a name="line-11162"></a>-}</span>
<a name="line-11163"></a>
<a name="line-11164"></a><span class='hs-comment'>-- | Concatenates tensors along one dimension.</span>
<a name="line-11165"></a>
<a name="line-11166"></a><a name="concatV2"></a><span class='hs-definition'>concatV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-11167"></a>                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11168"></a>                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11169"></a>            <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match,</span>
<a name="line-11170"></a>                          <span class='hs-comment'>-- and their sizes must match in all dimensions except `concat_dim`.</span>
<a name="line-11171"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __axis__: 0-D.  The dimension along which to concatenate.  Must be in the</span>
<a name="line-11172"></a>                              <span class='hs-comment'>-- range [0, rank(values)).</span>
<a name="line-11173"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: A `Tensor` with the concatenation of values stacked along the</span>
<a name="line-11174"></a>            <span class='hs-comment'>-- `concat_dim` dimension.  This tensor's shape matches that of `values` except</span>
<a name="line-11175"></a>            <span class='hs-comment'>-- in `concat_dim` where it has the sum of the sizes.</span>
<a name="line-11176"></a><span class='hs-definition'>concatV2</span> <span class='hs-varid'>values</span> <span class='hs-varid'>axis</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11177"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ConcatV2"</span>
<a name="line-11178"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-11179"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span>
<a name="line-11180"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-11181"></a>        <span class='hs-varid'>values</span> <span class='hs-varid'>axis</span>
<a name="line-11182"></a>  <span class='hs-keyword'>where</span>
<a name="line-11183"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-11184"></a><span class='hs-comment'>{-
<a name="line-11185"></a>attr { has_minimum: true minimum: 2 name: "N" type: "int" }
<a name="line-11186"></a>attr { name: "T" type: "type" }
<a name="line-11187"></a>attr {
<a name="line-11188"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-11189"></a>  default_value { type: DT_INT32 }
<a name="line-11190"></a>  name: "Tidx"
<a name="line-11191"></a>  type: "type"
<a name="line-11192"></a>}
<a name="line-11193"></a>input_arg {
<a name="line-11194"></a>  description: "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
<a name="line-11195"></a>  name: "values"
<a name="line-11196"></a>  number_attr: "N"
<a name="line-11197"></a>  type_attr: "T"
<a name="line-11198"></a>}
<a name="line-11199"></a>input_arg {
<a name="line-11200"></a>  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
<a name="line-11201"></a>  name: "axis"
<a name="line-11202"></a>  type_attr: "Tidx"
<a name="line-11203"></a>}
<a name="line-11204"></a>output_arg {
<a name="line-11205"></a>  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
<a name="line-11206"></a>  name: "output"
<a name="line-11207"></a>  type_attr: "T"
<a name="line-11208"></a>}
<a name="line-11209"></a>-}</span>
<a name="line-11210"></a>
<a name="line-11211"></a><span class='hs-comment'>-- | Returns a tensor of zeros with the same shape and type as x.</span>
<a name="line-11212"></a>
<a name="line-11213"></a><a name="zerosLike"></a><span class='hs-definition'>zerosLike</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11214"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: a tensor of type T.</span>
<a name="line-11215"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__: a tensor of the same shape and type as x but filled with zeros.</span>
<a name="line-11216"></a><span class='hs-definition'>zerosLike</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11217"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ZerosLike"</span>
<a name="line-11218"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11219"></a>        <span class='hs-varid'>x</span>
<a name="line-11220"></a><span class='hs-comment'>{-
<a name="line-11221"></a>attr { name: "T" type: "type" }
<a name="line-11222"></a>input_arg {
<a name="line-11223"></a>  description: "a tensor of type T." name: "x" type_attr: "T"
<a name="line-11224"></a>}
<a name="line-11225"></a>output_arg {
<a name="line-11226"></a>  description: "a tensor of the same shape and type as x but filled with zeros."
<a name="line-11227"></a>  name: "y"
<a name="line-11228"></a>  type_attr: "T"
<a name="line-11229"></a>}
<a name="line-11230"></a>-}</span>
<a name="line-11231"></a>
<a name="line-11232"></a><a name="applyCenteredRMSProp"></a><span class='hs-comment'>-- | Update '*var' according to the centered RMSProp algorithm.</span>
<a name="line-11233"></a><span class='hs-comment'>--</span>
<a name="line-11234"></a><span class='hs-comment'>-- The centered RMSProp algorithm uses an estimate of the centered second moment</span>
<a name="line-11235"></a><span class='hs-comment'>-- (i.e., the variance) for normalization, as opposed to regular RMSProp, which</span>
<a name="line-11236"></a><span class='hs-comment'>-- uses the (uncentered) second moment. This often helps with training, but is</span>
<a name="line-11237"></a><span class='hs-comment'>-- slightly more expensive in terms of computation and memory.</span>
<a name="line-11238"></a><span class='hs-comment'>-- </span>
<a name="line-11239"></a><span class='hs-comment'>-- Note that in dense implementation of this algorithm, mg, ms, and mom will</span>
<a name="line-11240"></a><span class='hs-comment'>-- update even if the grad is zero, but in this sparse implementation, mg, ms,</span>
<a name="line-11241"></a><span class='hs-comment'>-- and mom will not update in iterations during which the grad is zero.</span>
<a name="line-11242"></a><span class='hs-comment'>-- </span>
<a name="line-11243"></a><span class='hs-comment'>-- mean_square = decay * mean_square + (1-decay) * gradient ** 2</span>
<a name="line-11244"></a><span class='hs-comment'>-- mean_grad = decay * mean_grad + (1-decay) * gradient</span>
<a name="line-11245"></a><span class='hs-comment'>-- </span>
<a name="line-11246"></a><span class='hs-comment'>-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)</span>
<a name="line-11247"></a><span class='hs-comment'>-- </span>
<a name="line-11248"></a><span class='hs-comment'>-- mg &lt;- rho * mg_{t-1} + (1-rho) * grad</span>
<a name="line-11249"></a><span class='hs-comment'>-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad</span>
<a name="line-11250"></a><span class='hs-comment'>-- mom &lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)</span>
<a name="line-11251"></a><span class='hs-comment'>-- var &lt;- var - mom</span>
<a name="line-11252"></a><span class='hs-definition'>applyCenteredRMSProp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11253"></a>                                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11254"></a>                                                           <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11255"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-11256"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11257"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-11258"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11259"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-11260"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-11261"></a>                                                           <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11262"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11263"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mg__: Should be from a Variable().</span>
<a name="line-11264"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ms__: Should be from a Variable().</span>
<a name="line-11265"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mom__: Should be from a Variable().</span>
<a name="line-11266"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-11267"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay rate. Must be a scalar.</span>
<a name="line-11268"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__</span>
<a name="line-11269"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Ridge term. Must be a scalar.</span>
<a name="line-11270"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11271"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11272"></a><span class='hs-definition'>applyCenteredRMSProp</span> <span class='hs-varid'>var</span> <span class='hs-varid'>mg</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span>
<a name="line-11273"></a>                     <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11274"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyCenteredRMSProp"</span>
<a name="line-11275"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11276"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>mg</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-11277"></a><span class='hs-comment'>{-
<a name="line-11278"></a>attr {
<a name="line-11279"></a>  allowed_values {
<a name="line-11280"></a>    list {
<a name="line-11281"></a>      type: DT_FLOAT
<a name="line-11282"></a>      type: DT_DOUBLE
<a name="line-11283"></a>      type: DT_INT64
<a name="line-11284"></a>      type: DT_INT32
<a name="line-11285"></a>      type: DT_UINT8
<a name="line-11286"></a>      type: DT_UINT16
<a name="line-11287"></a>      type: DT_INT16
<a name="line-11288"></a>      type: DT_INT8
<a name="line-11289"></a>      type: DT_COMPLEX64
<a name="line-11290"></a>      type: DT_COMPLEX128
<a name="line-11291"></a>      type: DT_QINT8
<a name="line-11292"></a>      type: DT_QUINT8
<a name="line-11293"></a>      type: DT_QINT32
<a name="line-11294"></a>      type: DT_HALF
<a name="line-11295"></a>    }
<a name="line-11296"></a>  }
<a name="line-11297"></a>  name: "T"
<a name="line-11298"></a>  type: "type"
<a name="line-11299"></a>}
<a name="line-11300"></a>attr {
<a name="line-11301"></a>  default_value { b: false }
<a name="line-11302"></a>  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11303"></a>  name: "use_locking"
<a name="line-11304"></a>  type: "bool"
<a name="line-11305"></a>}
<a name="line-11306"></a>input_arg {
<a name="line-11307"></a>  description: "Should be from a Variable()."
<a name="line-11308"></a>  is_ref: true
<a name="line-11309"></a>  name: "var"
<a name="line-11310"></a>  type_attr: "T"
<a name="line-11311"></a>}
<a name="line-11312"></a>input_arg {
<a name="line-11313"></a>  description: "Should be from a Variable()."
<a name="line-11314"></a>  is_ref: true
<a name="line-11315"></a>  name: "mg"
<a name="line-11316"></a>  type_attr: "T"
<a name="line-11317"></a>}
<a name="line-11318"></a>input_arg {
<a name="line-11319"></a>  description: "Should be from a Variable()."
<a name="line-11320"></a>  is_ref: true
<a name="line-11321"></a>  name: "ms"
<a name="line-11322"></a>  type_attr: "T"
<a name="line-11323"></a>}
<a name="line-11324"></a>input_arg {
<a name="line-11325"></a>  description: "Should be from a Variable()."
<a name="line-11326"></a>  is_ref: true
<a name="line-11327"></a>  name: "mom"
<a name="line-11328"></a>  type_attr: "T"
<a name="line-11329"></a>}
<a name="line-11330"></a>input_arg {
<a name="line-11331"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-11332"></a>  name: "lr"
<a name="line-11333"></a>  type_attr: "T"
<a name="line-11334"></a>}
<a name="line-11335"></a>input_arg {
<a name="line-11336"></a>  description: "Decay rate. Must be a scalar."
<a name="line-11337"></a>  name: "rho"
<a name="line-11338"></a>  type_attr: "T"
<a name="line-11339"></a>}
<a name="line-11340"></a>input_arg { name: "momentum" type_attr: "T" }
<a name="line-11341"></a>input_arg {
<a name="line-11342"></a>  description: "Ridge term. Must be a scalar."
<a name="line-11343"></a>  name: "epsilon"
<a name="line-11344"></a>  type_attr: "T"
<a name="line-11345"></a>}
<a name="line-11346"></a>input_arg {
<a name="line-11347"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11348"></a>}
<a name="line-11349"></a>output_arg {
<a name="line-11350"></a>  description: "Same as \"var\"."
<a name="line-11351"></a>  is_ref: true
<a name="line-11352"></a>  name: "out"
<a name="line-11353"></a>  type_attr: "T"
<a name="line-11354"></a>}
<a name="line-11355"></a>-}</span>
<a name="line-11356"></a>
<a name="line-11357"></a><a name="applyRMSProp"></a><span class='hs-comment'>-- | Update '*var' according to the RMSProp algorithm.</span>
<a name="line-11358"></a><span class='hs-comment'>--</span>
<a name="line-11359"></a><span class='hs-comment'>-- Note that in dense implementation of this algorithm, ms and mom will</span>
<a name="line-11360"></a><span class='hs-comment'>-- update even if the grad is zero, but in this sparse implementation, ms</span>
<a name="line-11361"></a><span class='hs-comment'>-- and mom will not update in iterations during which the grad is zero.</span>
<a name="line-11362"></a><span class='hs-comment'>-- </span>
<a name="line-11363"></a><span class='hs-comment'>-- mean_square = decay * mean_square + (1-decay) * gradient ** 2</span>
<a name="line-11364"></a><span class='hs-comment'>-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)</span>
<a name="line-11365"></a><span class='hs-comment'>-- </span>
<a name="line-11366"></a><span class='hs-comment'>-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad</span>
<a name="line-11367"></a><span class='hs-comment'>-- mom &lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)</span>
<a name="line-11368"></a><span class='hs-comment'>-- var &lt;- var - mom</span>
<a name="line-11369"></a><span class='hs-definition'>applyRMSProp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11370"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11371"></a>                                                   <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11372"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-11373"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11374"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-11375"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11376"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-11377"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-11378"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11379"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11380"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ms__: Should be from a Variable().</span>
<a name="line-11381"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mom__: Should be from a Variable().</span>
<a name="line-11382"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-11383"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay rate. Must be a scalar.</span>
<a name="line-11384"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__</span>
<a name="line-11385"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Ridge term. Must be a scalar.</span>
<a name="line-11386"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11387"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11388"></a><span class='hs-definition'>applyRMSProp</span> <span class='hs-varid'>var</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11389"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyRMSProp"</span>
<a name="line-11390"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11391"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-11392"></a><span class='hs-comment'>{-
<a name="line-11393"></a>attr {
<a name="line-11394"></a>  allowed_values {
<a name="line-11395"></a>    list {
<a name="line-11396"></a>      type: DT_FLOAT
<a name="line-11397"></a>      type: DT_DOUBLE
<a name="line-11398"></a>      type: DT_INT64
<a name="line-11399"></a>      type: DT_INT32
<a name="line-11400"></a>      type: DT_UINT8
<a name="line-11401"></a>      type: DT_UINT16
<a name="line-11402"></a>      type: DT_INT16
<a name="line-11403"></a>      type: DT_INT8
<a name="line-11404"></a>      type: DT_COMPLEX64
<a name="line-11405"></a>      type: DT_COMPLEX128
<a name="line-11406"></a>      type: DT_QINT8
<a name="line-11407"></a>      type: DT_QUINT8
<a name="line-11408"></a>      type: DT_QINT32
<a name="line-11409"></a>      type: DT_HALF
<a name="line-11410"></a>    }
<a name="line-11411"></a>  }
<a name="line-11412"></a>  name: "T"
<a name="line-11413"></a>  type: "type"
<a name="line-11414"></a>}
<a name="line-11415"></a>attr {
<a name="line-11416"></a>  default_value { b: false }
<a name="line-11417"></a>  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11418"></a>  name: "use_locking"
<a name="line-11419"></a>  type: "bool"
<a name="line-11420"></a>}
<a name="line-11421"></a>input_arg {
<a name="line-11422"></a>  description: "Should be from a Variable()."
<a name="line-11423"></a>  is_ref: true
<a name="line-11424"></a>  name: "var"
<a name="line-11425"></a>  type_attr: "T"
<a name="line-11426"></a>}
<a name="line-11427"></a>input_arg {
<a name="line-11428"></a>  description: "Should be from a Variable()."
<a name="line-11429"></a>  is_ref: true
<a name="line-11430"></a>  name: "ms"
<a name="line-11431"></a>  type_attr: "T"
<a name="line-11432"></a>}
<a name="line-11433"></a>input_arg {
<a name="line-11434"></a>  description: "Should be from a Variable()."
<a name="line-11435"></a>  is_ref: true
<a name="line-11436"></a>  name: "mom"
<a name="line-11437"></a>  type_attr: "T"
<a name="line-11438"></a>}
<a name="line-11439"></a>input_arg {
<a name="line-11440"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-11441"></a>  name: "lr"
<a name="line-11442"></a>  type_attr: "T"
<a name="line-11443"></a>}
<a name="line-11444"></a>input_arg {
<a name="line-11445"></a>  description: "Decay rate. Must be a scalar."
<a name="line-11446"></a>  name: "rho"
<a name="line-11447"></a>  type_attr: "T"
<a name="line-11448"></a>}
<a name="line-11449"></a>input_arg { name: "momentum" type_attr: "T" }
<a name="line-11450"></a>input_arg {
<a name="line-11451"></a>  description: "Ridge term. Must be a scalar."
<a name="line-11452"></a>  name: "epsilon"
<a name="line-11453"></a>  type_attr: "T"
<a name="line-11454"></a>}
<a name="line-11455"></a>input_arg {
<a name="line-11456"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11457"></a>}
<a name="line-11458"></a>output_arg {
<a name="line-11459"></a>  description: "Same as \"var\"."
<a name="line-11460"></a>  is_ref: true
<a name="line-11461"></a>  name: "out"
<a name="line-11462"></a>  type_attr: "T"
<a name="line-11463"></a>}
<a name="line-11464"></a>-}</span>
<a name="line-11465"></a>
<a name="line-11466"></a><a name="assignAddVariableOp"></a><span class='hs-comment'>-- | Adds a value to the current value of a variable.</span>
<a name="line-11467"></a><span class='hs-comment'>--</span>
<a name="line-11468"></a><span class='hs-comment'>-- Any ReadVariableOp which depends directly or indirectly on this assign is</span>
<a name="line-11469"></a><span class='hs-comment'>-- guaranteed to see the incremented value or a subsequent newer one.</span>
<a name="line-11470"></a><span class='hs-comment'>-- </span>
<a name="line-11471"></a><span class='hs-comment'>-- Outputs the incremented value, which can be used to totally order the</span>
<a name="line-11472"></a><span class='hs-comment'>-- increments to this variable.</span>
<a name="line-11473"></a><span class='hs-definition'>assignAddVariableOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11474"></a>                       <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: handle to the resource in which to store the variable.</span>
<a name="line-11475"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: the value by which the variable will be incremented.</span>
<a name="line-11476"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-11477"></a><span class='hs-definition'>assignAddVariableOp</span> <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11478"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AssignAddVariableOp"</span>
<a name="line-11479"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11480"></a>        <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span>
<a name="line-11481"></a><span class='hs-comment'>{-
<a name="line-11482"></a>attr {
<a name="line-11483"></a>  description: "the dtype of the value." name: "dtype" type: "type"
<a name="line-11484"></a>}
<a name="line-11485"></a>input_arg {
<a name="line-11486"></a>  description: "handle to the resource in which to store the variable."
<a name="line-11487"></a>  name: "resource"
<a name="line-11488"></a>  type: DT_RESOURCE
<a name="line-11489"></a>}
<a name="line-11490"></a>input_arg {
<a name="line-11491"></a>  description: "the value by which the variable will be incremented."
<a name="line-11492"></a>  name: "value"
<a name="line-11493"></a>  type_attr: "dtype"
<a name="line-11494"></a>}
<a name="line-11495"></a>-}</span>
<a name="line-11496"></a>
<a name="line-11497"></a><a name="applyAdam"></a><span class='hs-comment'>-- | Update '*var' according to the Adam algorithm.</span>
<a name="line-11498"></a><span class='hs-comment'>--</span>
<a name="line-11499"></a><span class='hs-comment'>-- lr_t &lt;- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)</span>
<a name="line-11500"></a><span class='hs-comment'>-- m_t &lt;- beta1 * m_{t-1} + (1 - beta1) * g_t</span>
<a name="line-11501"></a><span class='hs-comment'>-- v_t &lt;- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t</span>
<a name="line-11502"></a><span class='hs-comment'>-- variable &lt;- variable - lr_t * m_t / (sqrt(v_t) + epsilon)</span>
<a name="line-11503"></a><span class='hs-definition'>applyAdam</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>v10</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11504"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11505"></a>                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11506"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-11507"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11508"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-11509"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11510"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-11511"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-11512"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11513"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11514"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __m__: Should be from a Variable().</span>
<a name="line-11515"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __v__: Should be from a Variable().</span>
<a name="line-11516"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __beta1_power__: Must be a scalar.</span>
<a name="line-11517"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __beta2_power__: Must be a scalar.</span>
<a name="line-11518"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-11519"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __beta1__: Momentum factor. Must be a scalar.</span>
<a name="line-11520"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __beta2__: Momentum factor. Must be a scalar.</span>
<a name="line-11521"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Ridge term. Must be a scalar.</span>
<a name="line-11522"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v10</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11523"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11524"></a><span class='hs-definition'>applyAdam</span> <span class='hs-varid'>var</span> <span class='hs-varid'>m</span> <span class='hs-varid'>v</span> <span class='hs-varid'>beta1_power</span> <span class='hs-varid'>beta2_power</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>beta1</span> <span class='hs-varid'>beta2</span> <span class='hs-varid'>epsilon</span>
<a name="line-11525"></a>          <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11526"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyAdam"</span>
<a name="line-11527"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11528"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>m</span> <span class='hs-varid'>v</span> <span class='hs-varid'>beta1_power</span> <span class='hs-varid'>beta2_power</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>beta1</span> <span class='hs-varid'>beta2</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-11529"></a><span class='hs-comment'>{-
<a name="line-11530"></a>attr {
<a name="line-11531"></a>  allowed_values {
<a name="line-11532"></a>    list {
<a name="line-11533"></a>      type: DT_FLOAT
<a name="line-11534"></a>      type: DT_DOUBLE
<a name="line-11535"></a>      type: DT_INT64
<a name="line-11536"></a>      type: DT_INT32
<a name="line-11537"></a>      type: DT_UINT8
<a name="line-11538"></a>      type: DT_UINT16
<a name="line-11539"></a>      type: DT_INT16
<a name="line-11540"></a>      type: DT_INT8
<a name="line-11541"></a>      type: DT_COMPLEX64
<a name="line-11542"></a>      type: DT_COMPLEX128
<a name="line-11543"></a>      type: DT_QINT8
<a name="line-11544"></a>      type: DT_QUINT8
<a name="line-11545"></a>      type: DT_QINT32
<a name="line-11546"></a>      type: DT_HALF
<a name="line-11547"></a>    }
<a name="line-11548"></a>  }
<a name="line-11549"></a>  name: "T"
<a name="line-11550"></a>  type: "type"
<a name="line-11551"></a>}
<a name="line-11552"></a>attr {
<a name="line-11553"></a>  default_value { b: false }
<a name="line-11554"></a>  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11555"></a>  name: "use_locking"
<a name="line-11556"></a>  type: "bool"
<a name="line-11557"></a>}
<a name="line-11558"></a>input_arg {
<a name="line-11559"></a>  description: "Should be from a Variable()."
<a name="line-11560"></a>  is_ref: true
<a name="line-11561"></a>  name: "var"
<a name="line-11562"></a>  type_attr: "T"
<a name="line-11563"></a>}
<a name="line-11564"></a>input_arg {
<a name="line-11565"></a>  description: "Should be from a Variable()."
<a name="line-11566"></a>  is_ref: true
<a name="line-11567"></a>  name: "m"
<a name="line-11568"></a>  type_attr: "T"
<a name="line-11569"></a>}
<a name="line-11570"></a>input_arg {
<a name="line-11571"></a>  description: "Should be from a Variable()."
<a name="line-11572"></a>  is_ref: true
<a name="line-11573"></a>  name: "v"
<a name="line-11574"></a>  type_attr: "T"
<a name="line-11575"></a>}
<a name="line-11576"></a>input_arg {
<a name="line-11577"></a>  description: "Must be a scalar." name: "beta1_power" type_attr: "T"
<a name="line-11578"></a>}
<a name="line-11579"></a>input_arg {
<a name="line-11580"></a>  description: "Must be a scalar." name: "beta2_power" type_attr: "T"
<a name="line-11581"></a>}
<a name="line-11582"></a>input_arg {
<a name="line-11583"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-11584"></a>  name: "lr"
<a name="line-11585"></a>  type_attr: "T"
<a name="line-11586"></a>}
<a name="line-11587"></a>input_arg {
<a name="line-11588"></a>  description: "Momentum factor. Must be a scalar."
<a name="line-11589"></a>  name: "beta1"
<a name="line-11590"></a>  type_attr: "T"
<a name="line-11591"></a>}
<a name="line-11592"></a>input_arg {
<a name="line-11593"></a>  description: "Momentum factor. Must be a scalar."
<a name="line-11594"></a>  name: "beta2"
<a name="line-11595"></a>  type_attr: "T"
<a name="line-11596"></a>}
<a name="line-11597"></a>input_arg {
<a name="line-11598"></a>  description: "Ridge term. Must be a scalar."
<a name="line-11599"></a>  name: "epsilon"
<a name="line-11600"></a>  type_attr: "T"
<a name="line-11601"></a>}
<a name="line-11602"></a>input_arg {
<a name="line-11603"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11604"></a>}
<a name="line-11605"></a>output_arg {
<a name="line-11606"></a>  description: "Same as \"var\"."
<a name="line-11607"></a>  is_ref: true
<a name="line-11608"></a>  name: "out"
<a name="line-11609"></a>  type_attr: "T"
<a name="line-11610"></a>}
<a name="line-11611"></a>-}</span>
<a name="line-11612"></a>
<a name="line-11613"></a><a name="extractGlimpse"></a><span class='hs-comment'>-- | Extracts a glimpse from the input tensor.</span>
<a name="line-11614"></a><span class='hs-comment'>--</span>
<a name="line-11615"></a><span class='hs-comment'>-- Returns a set of windows called glimpses extracted at location</span>
<a name="line-11616"></a><span class='hs-comment'>-- `offsets` from the input tensor. If the windows only partially</span>
<a name="line-11617"></a><span class='hs-comment'>-- overlaps the inputs, the non overlapping areas will be filled with</span>
<a name="line-11618"></a><span class='hs-comment'>-- random noise.</span>
<a name="line-11619"></a><span class='hs-comment'>-- </span>
<a name="line-11620"></a><span class='hs-comment'>-- The result is a 4-D tensor of shape `[batch_size, glimpse_height,</span>
<a name="line-11621"></a><span class='hs-comment'>-- glimpse_width, channels]`. The channels and batch dimensions are the</span>
<a name="line-11622"></a><span class='hs-comment'>-- same as that of the input tensor. The height and width of the output</span>
<a name="line-11623"></a><span class='hs-comment'>-- windows are specified in the `size` parameter.</span>
<a name="line-11624"></a><span class='hs-comment'>-- </span>
<a name="line-11625"></a><span class='hs-comment'>-- The argument `normalized` and `centered` controls how the windows are built:</span>
<a name="line-11626"></a><span class='hs-comment'>-- </span>
<a name="line-11627"></a><span class='hs-comment'>-- * If the coordinates are normalized but not centered, 0.0 and 1.0</span>
<a name="line-11628"></a><span class='hs-comment'>--   correspond to the minimum and maximum of each height and width</span>
<a name="line-11629"></a><span class='hs-comment'>--   dimension.</span>
<a name="line-11630"></a><span class='hs-comment'>-- * If the coordinates are both normalized and centered, they range from</span>
<a name="line-11631"></a><span class='hs-comment'>--   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper</span>
<a name="line-11632"></a><span class='hs-comment'>--   left corner, the lower right corner is located at (1.0, 1.0) and the</span>
<a name="line-11633"></a><span class='hs-comment'>--   center is at (0, 0).</span>
<a name="line-11634"></a><span class='hs-comment'>-- * If the coordinates are not normalized they are interpreted as</span>
<a name="line-11635"></a><span class='hs-comment'>--   numbers of pixels.</span>
<a name="line-11636"></a><span class='hs-definition'>extractGlimpse</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`.</span>
<a name="line-11637"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses</span>
<a name="line-11638"></a>                                              <span class='hs-comment'>-- to extract.  The glimpse height must be specified first, following</span>
<a name="line-11639"></a>                                              <span class='hs-comment'>-- by the glimpse width.</span>
<a name="line-11640"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing</span>
<a name="line-11641"></a>                                     <span class='hs-comment'>-- the x, y locations of the center of each window.</span>
<a name="line-11642"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __glimpse__: A tensor representing the glimpses `[batch_size,</span>
<a name="line-11643"></a>                  <span class='hs-comment'>-- glimpse_height, glimpse_width, channels]`.</span>
<a name="line-11644"></a><span class='hs-definition'>extractGlimpse</span> <span class='hs-varid'>input</span> <span class='hs-varid'>size</span> <span class='hs-varid'>offsets</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11645"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ExtractGlimpse"</span><span class='hs-layout'>)</span>
<a name="line-11646"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>size</span> <span class='hs-varid'>offsets</span>
<a name="line-11647"></a><span class='hs-comment'>{-
<a name="line-11648"></a>attr {
<a name="line-11649"></a>  default_value { b: true }
<a name="line-11650"></a>  description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images."
<a name="line-11651"></a>  name: "centered"
<a name="line-11652"></a>  type: "bool"
<a name="line-11653"></a>}
<a name="line-11654"></a>attr {
<a name="line-11655"></a>  default_value { b: true }
<a name="line-11656"></a>  description: "indicates if the offset coordinates are normalized."
<a name="line-11657"></a>  name: "normalized"
<a name="line-11658"></a>  type: "bool"
<a name="line-11659"></a>}
<a name="line-11660"></a>attr {
<a name="line-11661"></a>  default_value { b: true }
<a name="line-11662"></a>  description: "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution."
<a name="line-11663"></a>  name: "uniform_noise"
<a name="line-11664"></a>  type: "bool"
<a name="line-11665"></a>}
<a name="line-11666"></a>input_arg {
<a name="line-11667"></a>  description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`."
<a name="line-11668"></a>  name: "input"
<a name="line-11669"></a>  type: DT_FLOAT
<a name="line-11670"></a>}
<a name="line-11671"></a>input_arg {
<a name="line-11672"></a>  description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract.  The glimpse height must be specified first, following\nby the glimpse width."
<a name="line-11673"></a>  name: "size"
<a name="line-11674"></a>  type: DT_INT32
<a name="line-11675"></a>}
<a name="line-11676"></a>input_arg {
<a name="line-11677"></a>  description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe x, y locations of the center of each window."
<a name="line-11678"></a>  name: "offsets"
<a name="line-11679"></a>  type: DT_FLOAT
<a name="line-11680"></a>}
<a name="line-11681"></a>output_arg {
<a name="line-11682"></a>  description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`."
<a name="line-11683"></a>  name: "glimpse"
<a name="line-11684"></a>  type: DT_FLOAT
<a name="line-11685"></a>}
<a name="line-11686"></a>-}</span>
<a name="line-11687"></a>
<a name="line-11688"></a><a name="sparseApplyMomentum"></a><span class='hs-comment'>-- | Update relevant entries in '*var' and '*accum' according to the momentum scheme.</span>
<a name="line-11689"></a><span class='hs-comment'>--</span>
<a name="line-11690"></a><span class='hs-comment'>-- Set use_nesterov = True if you want to use Nesterov momentum.</span>
<a name="line-11691"></a><span class='hs-comment'>-- </span>
<a name="line-11692"></a><span class='hs-comment'>-- That is for rows we have grad for, we update var and accum as follows:</span>
<a name="line-11693"></a><span class='hs-comment'>-- </span>
<a name="line-11694"></a><span class='hs-comment'>-- accum = accum * momentum + grad</span>
<a name="line-11695"></a><span class='hs-comment'>-- var -= lr * accum</span>
<a name="line-11696"></a><span class='hs-definition'>sparseApplyMomentum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11697"></a>                                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11698"></a>                                                                <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11699"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-11700"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11701"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-11702"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11703"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-11704"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-11705"></a>                                                                <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-11706"></a>                                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11707"></a>                                                        <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-11708"></a>                                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11709"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11710"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11711"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-11712"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Learning rate. Must be a scalar.</span>
<a name="line-11713"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11714"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-11715"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__: Momentum. Must be a scalar.</span>
<a name="line-11716"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11717"></a><span class='hs-definition'>sparseApplyMomentum</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>momentum</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11718"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyMomentum"</span>
<a name="line-11719"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-11720"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11721"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>momentum</span>
<a name="line-11722"></a><span class='hs-comment'>{-
<a name="line-11723"></a>attr {
<a name="line-11724"></a>  allowed_values {
<a name="line-11725"></a>    list {
<a name="line-11726"></a>      type: DT_FLOAT
<a name="line-11727"></a>      type: DT_DOUBLE
<a name="line-11728"></a>      type: DT_INT64
<a name="line-11729"></a>      type: DT_INT32
<a name="line-11730"></a>      type: DT_UINT8
<a name="line-11731"></a>      type: DT_UINT16
<a name="line-11732"></a>      type: DT_INT16
<a name="line-11733"></a>      type: DT_INT8
<a name="line-11734"></a>      type: DT_COMPLEX64
<a name="line-11735"></a>      type: DT_COMPLEX128
<a name="line-11736"></a>      type: DT_QINT8
<a name="line-11737"></a>      type: DT_QUINT8
<a name="line-11738"></a>      type: DT_QINT32
<a name="line-11739"></a>      type: DT_HALF
<a name="line-11740"></a>    }
<a name="line-11741"></a>  }
<a name="line-11742"></a>  name: "T"
<a name="line-11743"></a>  type: "type"
<a name="line-11744"></a>}
<a name="line-11745"></a>attr {
<a name="line-11746"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-11747"></a>  name: "Tindices"
<a name="line-11748"></a>  type: "type"
<a name="line-11749"></a>}
<a name="line-11750"></a>attr {
<a name="line-11751"></a>  default_value { b: false }
<a name="line-11752"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11753"></a>  name: "use_locking"
<a name="line-11754"></a>  type: "bool"
<a name="line-11755"></a>}
<a name="line-11756"></a>attr {
<a name="line-11757"></a>  default_value { b: false }
<a name="line-11758"></a>  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
<a name="line-11759"></a>  name: "use_nesterov"
<a name="line-11760"></a>  type: "bool"
<a name="line-11761"></a>}
<a name="line-11762"></a>input_arg {
<a name="line-11763"></a>  description: "Should be from a Variable()."
<a name="line-11764"></a>  is_ref: true
<a name="line-11765"></a>  name: "var"
<a name="line-11766"></a>  type_attr: "T"
<a name="line-11767"></a>}
<a name="line-11768"></a>input_arg {
<a name="line-11769"></a>  description: "Should be from a Variable()."
<a name="line-11770"></a>  is_ref: true
<a name="line-11771"></a>  name: "accum"
<a name="line-11772"></a>  type_attr: "T"
<a name="line-11773"></a>}
<a name="line-11774"></a>input_arg {
<a name="line-11775"></a>  description: "Learning rate. Must be a scalar."
<a name="line-11776"></a>  name: "lr"
<a name="line-11777"></a>  type_attr: "T"
<a name="line-11778"></a>}
<a name="line-11779"></a>input_arg {
<a name="line-11780"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11781"></a>}
<a name="line-11782"></a>input_arg {
<a name="line-11783"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-11784"></a>  name: "indices"
<a name="line-11785"></a>  type_attr: "Tindices"
<a name="line-11786"></a>}
<a name="line-11787"></a>input_arg {
<a name="line-11788"></a>  description: "Momentum. Must be a scalar."
<a name="line-11789"></a>  name: "momentum"
<a name="line-11790"></a>  type_attr: "T"
<a name="line-11791"></a>}
<a name="line-11792"></a>output_arg {
<a name="line-11793"></a>  description: "Same as \"var\"."
<a name="line-11794"></a>  is_ref: true
<a name="line-11795"></a>  name: "out"
<a name="line-11796"></a>  type_attr: "T"
<a name="line-11797"></a>}
<a name="line-11798"></a>-}</span>
<a name="line-11799"></a>
<a name="line-11800"></a><a name="applyMomentum"></a><span class='hs-comment'>-- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you</span>
<a name="line-11801"></a><span class='hs-comment'>--</span>
<a name="line-11802"></a><span class='hs-comment'>-- want to use Nesterov momentum.</span>
<a name="line-11803"></a><span class='hs-comment'>-- </span>
<a name="line-11804"></a><span class='hs-comment'>-- accum = accum * momentum + grad</span>
<a name="line-11805"></a><span class='hs-comment'>-- var -= lr * accum</span>
<a name="line-11806"></a><span class='hs-definition'>applyMomentum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11807"></a>                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11808"></a>                                              <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11809"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11810"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11811"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-11812"></a>                                              <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11813"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11814"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-11815"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-11816"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11817"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__: Momentum. Must be a scalar.</span>
<a name="line-11818"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11819"></a><span class='hs-definition'>applyMomentum</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>momentum</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11820"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyMomentum"</span>
<a name="line-11821"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11822"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>momentum</span>
<a name="line-11823"></a><span class='hs-comment'>{-
<a name="line-11824"></a>attr {
<a name="line-11825"></a>  allowed_values {
<a name="line-11826"></a>    list {
<a name="line-11827"></a>      type: DT_FLOAT
<a name="line-11828"></a>      type: DT_DOUBLE
<a name="line-11829"></a>      type: DT_INT64
<a name="line-11830"></a>      type: DT_INT32
<a name="line-11831"></a>      type: DT_UINT8
<a name="line-11832"></a>      type: DT_UINT16
<a name="line-11833"></a>      type: DT_INT16
<a name="line-11834"></a>      type: DT_INT8
<a name="line-11835"></a>      type: DT_COMPLEX64
<a name="line-11836"></a>      type: DT_COMPLEX128
<a name="line-11837"></a>      type: DT_QINT8
<a name="line-11838"></a>      type: DT_QUINT8
<a name="line-11839"></a>      type: DT_QINT32
<a name="line-11840"></a>      type: DT_HALF
<a name="line-11841"></a>    }
<a name="line-11842"></a>  }
<a name="line-11843"></a>  name: "T"
<a name="line-11844"></a>  type: "type"
<a name="line-11845"></a>}
<a name="line-11846"></a>attr {
<a name="line-11847"></a>  default_value { b: false }
<a name="line-11848"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-11849"></a>  name: "use_locking"
<a name="line-11850"></a>  type: "bool"
<a name="line-11851"></a>}
<a name="line-11852"></a>attr {
<a name="line-11853"></a>  default_value { b: false }
<a name="line-11854"></a>  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
<a name="line-11855"></a>  name: "use_nesterov"
<a name="line-11856"></a>  type: "bool"
<a name="line-11857"></a>}
<a name="line-11858"></a>input_arg {
<a name="line-11859"></a>  description: "Should be from a Variable()."
<a name="line-11860"></a>  is_ref: true
<a name="line-11861"></a>  name: "var"
<a name="line-11862"></a>  type_attr: "T"
<a name="line-11863"></a>}
<a name="line-11864"></a>input_arg {
<a name="line-11865"></a>  description: "Should be from a Variable()."
<a name="line-11866"></a>  is_ref: true
<a name="line-11867"></a>  name: "accum"
<a name="line-11868"></a>  type_attr: "T"
<a name="line-11869"></a>}
<a name="line-11870"></a>input_arg {
<a name="line-11871"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-11872"></a>  name: "lr"
<a name="line-11873"></a>  type_attr: "T"
<a name="line-11874"></a>}
<a name="line-11875"></a>input_arg {
<a name="line-11876"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-11877"></a>}
<a name="line-11878"></a>input_arg {
<a name="line-11879"></a>  description: "Momentum. Must be a scalar."
<a name="line-11880"></a>  name: "momentum"
<a name="line-11881"></a>  type_attr: "T"
<a name="line-11882"></a>}
<a name="line-11883"></a>output_arg {
<a name="line-11884"></a>  description: "Same as \"var\"."
<a name="line-11885"></a>  is_ref: true
<a name="line-11886"></a>  name: "out"
<a name="line-11887"></a>  type_attr: "T"
<a name="line-11888"></a>}
<a name="line-11889"></a>-}</span>
<a name="line-11890"></a>
<a name="line-11891"></a><span class='hs-comment'>-- | A queue that produces elements in first-in first-out order.</span>
<a name="line-11892"></a>
<a name="line-11893"></a><a name="fIFOQueue"></a><span class='hs-definition'>fIFOQueue</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __handle__: The handle to the queue.</span>
<a name="line-11894"></a><span class='hs-definition'>fIFOQueue</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11895"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FIFOQueue"</span><span class='hs-layout'>)</span>
<a name="line-11896"></a>        
<a name="line-11897"></a><span class='hs-comment'>{-
<a name="line-11898"></a>attr {
<a name="line-11899"></a>  description: "The type of each component in a value."
<a name="line-11900"></a>  has_minimum: true
<a name="line-11901"></a>  minimum: 1
<a name="line-11902"></a>  name: "component_types"
<a name="line-11903"></a>  type: "list(type)"
<a name="line-11904"></a>}
<a name="line-11905"></a>attr {
<a name="line-11906"></a>  default_value { list { } }
<a name="line-11907"></a>  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
<a name="line-11908"></a>  has_minimum: true
<a name="line-11909"></a>  name: "shapes"
<a name="line-11910"></a>  type: "list(shape)"
<a name="line-11911"></a>}
<a name="line-11912"></a>attr {
<a name="line-11913"></a>  default_value { i: -1 }
<a name="line-11914"></a>  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
<a name="line-11915"></a>  name: "capacity"
<a name="line-11916"></a>  type: "int"
<a name="line-11917"></a>}
<a name="line-11918"></a>attr {
<a name="line-11919"></a>  default_value { s: "" }
<a name="line-11920"></a>  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
<a name="line-11921"></a>  name: "container"
<a name="line-11922"></a>  type: "string"
<a name="line-11923"></a>}
<a name="line-11924"></a>attr {
<a name="line-11925"></a>  default_value { s: "" }
<a name="line-11926"></a>  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
<a name="line-11927"></a>  name: "shared_name"
<a name="line-11928"></a>  type: "string"
<a name="line-11929"></a>}
<a name="line-11930"></a>output_arg {
<a name="line-11931"></a>  description: "The handle to the queue."
<a name="line-11932"></a>  is_ref: true
<a name="line-11933"></a>  name: "handle"
<a name="line-11934"></a>  type: DT_STRING
<a name="line-11935"></a>}
<a name="line-11936"></a>-}</span>
<a name="line-11937"></a>
<a name="line-11938"></a><a name="sparseApplyFtrl"></a><span class='hs-comment'>-- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.</span>
<a name="line-11939"></a><span class='hs-comment'>--</span>
<a name="line-11940"></a><span class='hs-comment'>-- That is for rows we have grad for, we update var, accum and linear as follows:</span>
<a name="line-11941"></a><span class='hs-comment'>-- accum_new = accum + grad * grad</span>
<a name="line-11942"></a><span class='hs-comment'>-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var</span>
<a name="line-11943"></a><span class='hs-comment'>-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2</span>
<a name="line-11944"></a><span class='hs-comment'>-- var = (sign(linear) * l1 - linear) / quadratic if |linear| &gt; l1 else 0.0</span>
<a name="line-11945"></a><span class='hs-comment'>-- accum = accum_new</span>
<a name="line-11946"></a><span class='hs-definition'>sparseApplyFtrl</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11947"></a>                                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11948"></a>                                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-11949"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-11950"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11951"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-11952"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-11953"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-11954"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-11955"></a>                                                                  <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-11956"></a>                                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-11957"></a>                                                          <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-11958"></a>                                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-11959"></a>                                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-11960"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-11961"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-11962"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __linear__: Should be from a Variable().</span>
<a name="line-11963"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-11964"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-11965"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-11966"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-11967"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-11968"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr_power__: Scaling factor. Must be a scalar.</span>
<a name="line-11969"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-11970"></a><span class='hs-definition'>sparseApplyFtrl</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>linear</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span>
<a name="line-11971"></a>                <span class='hs-varid'>lr_power</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-11972"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyFtrl"</span>
<a name="line-11973"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-11974"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-11975"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>linear</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>lr_power</span>
<a name="line-11976"></a><span class='hs-comment'>{-
<a name="line-11977"></a>attr {
<a name="line-11978"></a>  allowed_values {
<a name="line-11979"></a>    list {
<a name="line-11980"></a>      type: DT_FLOAT
<a name="line-11981"></a>      type: DT_DOUBLE
<a name="line-11982"></a>      type: DT_INT64
<a name="line-11983"></a>      type: DT_INT32
<a name="line-11984"></a>      type: DT_UINT8
<a name="line-11985"></a>      type: DT_UINT16
<a name="line-11986"></a>      type: DT_INT16
<a name="line-11987"></a>      type: DT_INT8
<a name="line-11988"></a>      type: DT_COMPLEX64
<a name="line-11989"></a>      type: DT_COMPLEX128
<a name="line-11990"></a>      type: DT_QINT8
<a name="line-11991"></a>      type: DT_QUINT8
<a name="line-11992"></a>      type: DT_QINT32
<a name="line-11993"></a>      type: DT_HALF
<a name="line-11994"></a>    }
<a name="line-11995"></a>  }
<a name="line-11996"></a>  name: "T"
<a name="line-11997"></a>  type: "type"
<a name="line-11998"></a>}
<a name="line-11999"></a>attr {
<a name="line-12000"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-12001"></a>  name: "Tindices"
<a name="line-12002"></a>  type: "type"
<a name="line-12003"></a>}
<a name="line-12004"></a>attr {
<a name="line-12005"></a>  default_value { b: false }
<a name="line-12006"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-12007"></a>  name: "use_locking"
<a name="line-12008"></a>  type: "bool"
<a name="line-12009"></a>}
<a name="line-12010"></a>input_arg {
<a name="line-12011"></a>  description: "Should be from a Variable()."
<a name="line-12012"></a>  is_ref: true
<a name="line-12013"></a>  name: "var"
<a name="line-12014"></a>  type_attr: "T"
<a name="line-12015"></a>}
<a name="line-12016"></a>input_arg {
<a name="line-12017"></a>  description: "Should be from a Variable()."
<a name="line-12018"></a>  is_ref: true
<a name="line-12019"></a>  name: "accum"
<a name="line-12020"></a>  type_attr: "T"
<a name="line-12021"></a>}
<a name="line-12022"></a>input_arg {
<a name="line-12023"></a>  description: "Should be from a Variable()."
<a name="line-12024"></a>  is_ref: true
<a name="line-12025"></a>  name: "linear"
<a name="line-12026"></a>  type_attr: "T"
<a name="line-12027"></a>}
<a name="line-12028"></a>input_arg {
<a name="line-12029"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12030"></a>}
<a name="line-12031"></a>input_arg {
<a name="line-12032"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-12033"></a>  name: "indices"
<a name="line-12034"></a>  type_attr: "Tindices"
<a name="line-12035"></a>}
<a name="line-12036"></a>input_arg {
<a name="line-12037"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12038"></a>  name: "lr"
<a name="line-12039"></a>  type_attr: "T"
<a name="line-12040"></a>}
<a name="line-12041"></a>input_arg {
<a name="line-12042"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12043"></a>  name: "l1"
<a name="line-12044"></a>  type_attr: "T"
<a name="line-12045"></a>}
<a name="line-12046"></a>input_arg {
<a name="line-12047"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12048"></a>  name: "l2"
<a name="line-12049"></a>  type_attr: "T"
<a name="line-12050"></a>}
<a name="line-12051"></a>input_arg {
<a name="line-12052"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12053"></a>  name: "lr_power"
<a name="line-12054"></a>  type_attr: "T"
<a name="line-12055"></a>}
<a name="line-12056"></a>output_arg {
<a name="line-12057"></a>  description: "Same as \"var\"."
<a name="line-12058"></a>  is_ref: true
<a name="line-12059"></a>  name: "out"
<a name="line-12060"></a>  type_attr: "T"
<a name="line-12061"></a>}
<a name="line-12062"></a>-}</span>
<a name="line-12063"></a>
<a name="line-12064"></a><span class='hs-comment'>-- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme.</span>
<a name="line-12065"></a>
<a name="line-12066"></a><a name="sparseApplyAdagradDA"></a><span class='hs-definition'>sparseApplyAdagradDA</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12067"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12068"></a>                                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12069"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12070"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12071"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12072"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12073"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12074"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12075"></a>                                                                       <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12076"></a>                                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12077"></a>                                                               <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-12078"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12079"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12080"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12081"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradient_accumulator__: Should be from a Variable().</span>
<a name="line-12082"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradient_squared_accumulator__: Should be from a Variable().</span>
<a name="line-12083"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12084"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-12085"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Learning rate. Must be a scalar.</span>
<a name="line-12086"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-12087"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-12088"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __global_step__: Training step number. Must be a scalar.</span>
<a name="line-12089"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12090"></a><span class='hs-definition'>sparseApplyAdagradDA</span> <span class='hs-varid'>var</span> <span class='hs-varid'>gradient_accumulator</span> <span class='hs-varid'>gradient_squared_accumulator</span> <span class='hs-varid'>grad</span>
<a name="line-12091"></a>                     <span class='hs-varid'>indices</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>global_step</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12092"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyAdagradDA"</span>
<a name="line-12093"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-12094"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12095"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>gradient_accumulator</span> <span class='hs-varid'>gradient_squared_accumulator</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span>
<a name="line-12096"></a>        <span class='hs-varid'>l2</span> <span class='hs-varid'>global_step</span>
<a name="line-12097"></a><span class='hs-comment'>{-
<a name="line-12098"></a>attr {
<a name="line-12099"></a>  allowed_values {
<a name="line-12100"></a>    list {
<a name="line-12101"></a>      type: DT_FLOAT
<a name="line-12102"></a>      type: DT_DOUBLE
<a name="line-12103"></a>      type: DT_INT64
<a name="line-12104"></a>      type: DT_INT32
<a name="line-12105"></a>      type: DT_UINT8
<a name="line-12106"></a>      type: DT_UINT16
<a name="line-12107"></a>      type: DT_INT16
<a name="line-12108"></a>      type: DT_INT8
<a name="line-12109"></a>      type: DT_COMPLEX64
<a name="line-12110"></a>      type: DT_COMPLEX128
<a name="line-12111"></a>      type: DT_QINT8
<a name="line-12112"></a>      type: DT_QUINT8
<a name="line-12113"></a>      type: DT_QINT32
<a name="line-12114"></a>      type: DT_HALF
<a name="line-12115"></a>    }
<a name="line-12116"></a>  }
<a name="line-12117"></a>  name: "T"
<a name="line-12118"></a>  type: "type"
<a name="line-12119"></a>}
<a name="line-12120"></a>attr {
<a name="line-12121"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-12122"></a>  name: "Tindices"
<a name="line-12123"></a>  type: "type"
<a name="line-12124"></a>}
<a name="line-12125"></a>attr {
<a name="line-12126"></a>  default_value { b: false }
<a name="line-12127"></a>  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12128"></a>  name: "use_locking"
<a name="line-12129"></a>  type: "bool"
<a name="line-12130"></a>}
<a name="line-12131"></a>input_arg {
<a name="line-12132"></a>  description: "Should be from a Variable()."
<a name="line-12133"></a>  is_ref: true
<a name="line-12134"></a>  name: "var"
<a name="line-12135"></a>  type_attr: "T"
<a name="line-12136"></a>}
<a name="line-12137"></a>input_arg {
<a name="line-12138"></a>  description: "Should be from a Variable()."
<a name="line-12139"></a>  is_ref: true
<a name="line-12140"></a>  name: "gradient_accumulator"
<a name="line-12141"></a>  type_attr: "T"
<a name="line-12142"></a>}
<a name="line-12143"></a>input_arg {
<a name="line-12144"></a>  description: "Should be from a Variable()."
<a name="line-12145"></a>  is_ref: true
<a name="line-12146"></a>  name: "gradient_squared_accumulator"
<a name="line-12147"></a>  type_attr: "T"
<a name="line-12148"></a>}
<a name="line-12149"></a>input_arg {
<a name="line-12150"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12151"></a>}
<a name="line-12152"></a>input_arg {
<a name="line-12153"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-12154"></a>  name: "indices"
<a name="line-12155"></a>  type_attr: "Tindices"
<a name="line-12156"></a>}
<a name="line-12157"></a>input_arg {
<a name="line-12158"></a>  description: "Learning rate. Must be a scalar."
<a name="line-12159"></a>  name: "lr"
<a name="line-12160"></a>  type_attr: "T"
<a name="line-12161"></a>}
<a name="line-12162"></a>input_arg {
<a name="line-12163"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12164"></a>  name: "l1"
<a name="line-12165"></a>  type_attr: "T"
<a name="line-12166"></a>}
<a name="line-12167"></a>input_arg {
<a name="line-12168"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12169"></a>  name: "l2"
<a name="line-12170"></a>  type_attr: "T"
<a name="line-12171"></a>}
<a name="line-12172"></a>input_arg {
<a name="line-12173"></a>  description: "Training step number. Must be a scalar."
<a name="line-12174"></a>  name: "global_step"
<a name="line-12175"></a>  type: DT_INT64
<a name="line-12176"></a>}
<a name="line-12177"></a>output_arg {
<a name="line-12178"></a>  description: "Same as \"var\"."
<a name="line-12179"></a>  is_ref: true
<a name="line-12180"></a>  name: "out"
<a name="line-12181"></a>  type_attr: "T"
<a name="line-12182"></a>}
<a name="line-12183"></a>-}</span>
<a name="line-12184"></a>
<a name="line-12185"></a><a name="floorDiv"></a><span class='hs-comment'>-- | Returns x // y element-wise.</span>
<a name="line-12186"></a><span class='hs-comment'>--</span>
<a name="line-12187"></a><span class='hs-comment'>-- *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting</span>
<a name="line-12188"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-12189"></a><span class='hs-definition'>floorDiv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12190"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12191"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12192"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12193"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12194"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12195"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-12196"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-12197"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-12198"></a><span class='hs-definition'>floorDiv</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12199"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FloorDiv"</span>
<a name="line-12200"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12201"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-12202"></a><span class='hs-comment'>{-
<a name="line-12203"></a>attr {
<a name="line-12204"></a>  allowed_values {
<a name="line-12205"></a>    list {
<a name="line-12206"></a>      type: DT_HALF
<a name="line-12207"></a>      type: DT_FLOAT
<a name="line-12208"></a>      type: DT_DOUBLE
<a name="line-12209"></a>      type: DT_UINT8
<a name="line-12210"></a>      type: DT_INT8
<a name="line-12211"></a>      type: DT_UINT16
<a name="line-12212"></a>      type: DT_INT16
<a name="line-12213"></a>      type: DT_INT32
<a name="line-12214"></a>      type: DT_INT64
<a name="line-12215"></a>      type: DT_COMPLEX64
<a name="line-12216"></a>      type: DT_COMPLEX128
<a name="line-12217"></a>    }
<a name="line-12218"></a>  }
<a name="line-12219"></a>  name: "T"
<a name="line-12220"></a>  type: "type"
<a name="line-12221"></a>}
<a name="line-12222"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-12223"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-12224"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-12225"></a>-}</span>
<a name="line-12226"></a>
<a name="line-12227"></a><span class='hs-comment'>-- | Update '*var' according to the proximal adagrad scheme.</span>
<a name="line-12228"></a>
<a name="line-12229"></a><a name="applyAdagradDA"></a><span class='hs-definition'>applyAdagradDA</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12230"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12231"></a>                                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12232"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12233"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12234"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12235"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12236"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12237"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12238"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12239"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12240"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradient_accumulator__: Should be from a Variable().</span>
<a name="line-12241"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gradient_squared_accumulator__: Should be from a Variable().</span>
<a name="line-12242"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12243"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-12244"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-12245"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-12246"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __global_step__: Training step number. Must be a scalar.</span>
<a name="line-12247"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12248"></a><span class='hs-definition'>applyAdagradDA</span> <span class='hs-varid'>var</span> <span class='hs-varid'>gradient_accumulator</span> <span class='hs-varid'>gradient_squared_accumulator</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span>
<a name="line-12249"></a>               <span class='hs-varid'>l2</span> <span class='hs-varid'>global_step</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12250"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyAdagradDA"</span>
<a name="line-12251"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12252"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>gradient_accumulator</span> <span class='hs-varid'>gradient_squared_accumulator</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span>
<a name="line-12253"></a>        <span class='hs-varid'>global_step</span>
<a name="line-12254"></a><span class='hs-comment'>{-
<a name="line-12255"></a>attr {
<a name="line-12256"></a>  allowed_values {
<a name="line-12257"></a>    list {
<a name="line-12258"></a>      type: DT_FLOAT
<a name="line-12259"></a>      type: DT_DOUBLE
<a name="line-12260"></a>      type: DT_INT64
<a name="line-12261"></a>      type: DT_INT32
<a name="line-12262"></a>      type: DT_UINT8
<a name="line-12263"></a>      type: DT_UINT16
<a name="line-12264"></a>      type: DT_INT16
<a name="line-12265"></a>      type: DT_INT8
<a name="line-12266"></a>      type: DT_COMPLEX64
<a name="line-12267"></a>      type: DT_COMPLEX128
<a name="line-12268"></a>      type: DT_QINT8
<a name="line-12269"></a>      type: DT_QUINT8
<a name="line-12270"></a>      type: DT_QINT32
<a name="line-12271"></a>      type: DT_HALF
<a name="line-12272"></a>    }
<a name="line-12273"></a>  }
<a name="line-12274"></a>  name: "T"
<a name="line-12275"></a>  type: "type"
<a name="line-12276"></a>}
<a name="line-12277"></a>attr {
<a name="line-12278"></a>  default_value { b: false }
<a name="line-12279"></a>  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12280"></a>  name: "use_locking"
<a name="line-12281"></a>  type: "bool"
<a name="line-12282"></a>}
<a name="line-12283"></a>input_arg {
<a name="line-12284"></a>  description: "Should be from a Variable()."
<a name="line-12285"></a>  is_ref: true
<a name="line-12286"></a>  name: "var"
<a name="line-12287"></a>  type_attr: "T"
<a name="line-12288"></a>}
<a name="line-12289"></a>input_arg {
<a name="line-12290"></a>  description: "Should be from a Variable()."
<a name="line-12291"></a>  is_ref: true
<a name="line-12292"></a>  name: "gradient_accumulator"
<a name="line-12293"></a>  type_attr: "T"
<a name="line-12294"></a>}
<a name="line-12295"></a>input_arg {
<a name="line-12296"></a>  description: "Should be from a Variable()."
<a name="line-12297"></a>  is_ref: true
<a name="line-12298"></a>  name: "gradient_squared_accumulator"
<a name="line-12299"></a>  type_attr: "T"
<a name="line-12300"></a>}
<a name="line-12301"></a>input_arg {
<a name="line-12302"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12303"></a>}
<a name="line-12304"></a>input_arg {
<a name="line-12305"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12306"></a>  name: "lr"
<a name="line-12307"></a>  type_attr: "T"
<a name="line-12308"></a>}
<a name="line-12309"></a>input_arg {
<a name="line-12310"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12311"></a>  name: "l1"
<a name="line-12312"></a>  type_attr: "T"
<a name="line-12313"></a>}
<a name="line-12314"></a>input_arg {
<a name="line-12315"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12316"></a>  name: "l2"
<a name="line-12317"></a>  type_attr: "T"
<a name="line-12318"></a>}
<a name="line-12319"></a>input_arg {
<a name="line-12320"></a>  description: "Training step number. Must be a scalar."
<a name="line-12321"></a>  name: "global_step"
<a name="line-12322"></a>  type: DT_INT64
<a name="line-12323"></a>}
<a name="line-12324"></a>output_arg {
<a name="line-12325"></a>  description: "Same as \"var\"."
<a name="line-12326"></a>  is_ref: true
<a name="line-12327"></a>  name: "out"
<a name="line-12328"></a>  type_attr: "T"
<a name="line-12329"></a>}
<a name="line-12330"></a>-}</span>
<a name="line-12331"></a>
<a name="line-12332"></a><a name="applyAdagrad"></a><span class='hs-comment'>-- | Update '*var' according to the adagrad scheme.</span>
<a name="line-12333"></a><span class='hs-comment'>--</span>
<a name="line-12334"></a><span class='hs-comment'>-- accum += grad * grad</span>
<a name="line-12335"></a><span class='hs-comment'>-- var -= lr * grad * (1 / sqrt(accum))</span>
<a name="line-12336"></a><span class='hs-definition'>applyAdagrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12337"></a>                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12338"></a>                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12339"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12340"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12341"></a>                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12342"></a>                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12343"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12344"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-12345"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-12346"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12347"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12348"></a><span class='hs-definition'>applyAdagrad</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12349"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyAdagrad"</span>
<a name="line-12350"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12351"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span>
<a name="line-12352"></a><span class='hs-comment'>{-
<a name="line-12353"></a>attr {
<a name="line-12354"></a>  allowed_values {
<a name="line-12355"></a>    list {
<a name="line-12356"></a>      type: DT_FLOAT
<a name="line-12357"></a>      type: DT_DOUBLE
<a name="line-12358"></a>      type: DT_INT64
<a name="line-12359"></a>      type: DT_INT32
<a name="line-12360"></a>      type: DT_UINT8
<a name="line-12361"></a>      type: DT_UINT16
<a name="line-12362"></a>      type: DT_INT16
<a name="line-12363"></a>      type: DT_INT8
<a name="line-12364"></a>      type: DT_COMPLEX64
<a name="line-12365"></a>      type: DT_COMPLEX128
<a name="line-12366"></a>      type: DT_QINT8
<a name="line-12367"></a>      type: DT_QUINT8
<a name="line-12368"></a>      type: DT_QINT32
<a name="line-12369"></a>      type: DT_HALF
<a name="line-12370"></a>    }
<a name="line-12371"></a>  }
<a name="line-12372"></a>  name: "T"
<a name="line-12373"></a>  type: "type"
<a name="line-12374"></a>}
<a name="line-12375"></a>attr {
<a name="line-12376"></a>  default_value { b: false }
<a name="line-12377"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-12378"></a>  name: "use_locking"
<a name="line-12379"></a>  type: "bool"
<a name="line-12380"></a>}
<a name="line-12381"></a>input_arg {
<a name="line-12382"></a>  description: "Should be from a Variable()."
<a name="line-12383"></a>  is_ref: true
<a name="line-12384"></a>  name: "var"
<a name="line-12385"></a>  type_attr: "T"
<a name="line-12386"></a>}
<a name="line-12387"></a>input_arg {
<a name="line-12388"></a>  description: "Should be from a Variable()."
<a name="line-12389"></a>  is_ref: true
<a name="line-12390"></a>  name: "accum"
<a name="line-12391"></a>  type_attr: "T"
<a name="line-12392"></a>}
<a name="line-12393"></a>input_arg {
<a name="line-12394"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12395"></a>  name: "lr"
<a name="line-12396"></a>  type_attr: "T"
<a name="line-12397"></a>}
<a name="line-12398"></a>input_arg {
<a name="line-12399"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12400"></a>}
<a name="line-12401"></a>output_arg {
<a name="line-12402"></a>  description: "Same as \"var\"."
<a name="line-12403"></a>  is_ref: true
<a name="line-12404"></a>  name: "out"
<a name="line-12405"></a>  type_attr: "T"
<a name="line-12406"></a>}
<a name="line-12407"></a>-}</span>
<a name="line-12408"></a>
<a name="line-12409"></a><a name="sigmoidGrad"></a><span class='hs-comment'>-- | Computes the gradient of the sigmoid of `x` wrt its input.</span>
<a name="line-12410"></a><span class='hs-comment'>--</span>
<a name="line-12411"></a><span class='hs-comment'>-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and</span>
<a name="line-12412"></a><span class='hs-comment'>-- `dy` is the corresponding input gradient.</span>
<a name="line-12413"></a><span class='hs-definition'>sigmoidGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12414"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12415"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12416"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12417"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-12418"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-12419"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-12420"></a><span class='hs-definition'>sigmoidGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12421"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SigmoidGrad"</span>
<a name="line-12422"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12423"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-12424"></a><span class='hs-comment'>{-
<a name="line-12425"></a>attr {
<a name="line-12426"></a>  allowed_values {
<a name="line-12427"></a>    list {
<a name="line-12428"></a>      type: DT_HALF
<a name="line-12429"></a>      type: DT_FLOAT
<a name="line-12430"></a>      type: DT_DOUBLE
<a name="line-12431"></a>      type: DT_COMPLEX64
<a name="line-12432"></a>      type: DT_COMPLEX128
<a name="line-12433"></a>    }
<a name="line-12434"></a>  }
<a name="line-12435"></a>  name: "T"
<a name="line-12436"></a>  type: "type"
<a name="line-12437"></a>}
<a name="line-12438"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-12439"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-12440"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-12441"></a>-}</span>
<a name="line-12442"></a>
<a name="line-12443"></a><a name="applyAdadelta"></a><span class='hs-comment'>-- | Update '*var' according to the adadelta scheme.</span>
<a name="line-12444"></a><span class='hs-comment'>--</span>
<a name="line-12445"></a><span class='hs-comment'>-- accum = rho() * accum + (1 - rho()) * grad.square();</span>
<a name="line-12446"></a><span class='hs-comment'>-- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;</span>
<a name="line-12447"></a><span class='hs-comment'>-- update_accum = rho() * update_accum + (1 - rho()) * update.square();</span>
<a name="line-12448"></a><span class='hs-comment'>-- var -= update;</span>
<a name="line-12449"></a><span class='hs-definition'>applyAdadelta</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12450"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12451"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12452"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12453"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12454"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12455"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12456"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12457"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12458"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-12459"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum_update__: Should be from a Variable().</span>
<a name="line-12460"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-12461"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay factor. Must be a scalar.</span>
<a name="line-12462"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Constant factor. Must be a scalar.</span>
<a name="line-12463"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12464"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12465"></a><span class='hs-definition'>applyAdadelta</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>accum_update</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12466"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyAdadelta"</span>
<a name="line-12467"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12468"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>accum_update</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-12469"></a><span class='hs-comment'>{-
<a name="line-12470"></a>attr {
<a name="line-12471"></a>  allowed_values {
<a name="line-12472"></a>    list {
<a name="line-12473"></a>      type: DT_FLOAT
<a name="line-12474"></a>      type: DT_DOUBLE
<a name="line-12475"></a>      type: DT_INT64
<a name="line-12476"></a>      type: DT_INT32
<a name="line-12477"></a>      type: DT_UINT8
<a name="line-12478"></a>      type: DT_UINT16
<a name="line-12479"></a>      type: DT_INT16
<a name="line-12480"></a>      type: DT_INT8
<a name="line-12481"></a>      type: DT_COMPLEX64
<a name="line-12482"></a>      type: DT_COMPLEX128
<a name="line-12483"></a>      type: DT_QINT8
<a name="line-12484"></a>      type: DT_QUINT8
<a name="line-12485"></a>      type: DT_QINT32
<a name="line-12486"></a>      type: DT_HALF
<a name="line-12487"></a>    }
<a name="line-12488"></a>  }
<a name="line-12489"></a>  name: "T"
<a name="line-12490"></a>  type: "type"
<a name="line-12491"></a>}
<a name="line-12492"></a>attr {
<a name="line-12493"></a>  default_value { b: false }
<a name="line-12494"></a>  description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12495"></a>  name: "use_locking"
<a name="line-12496"></a>  type: "bool"
<a name="line-12497"></a>}
<a name="line-12498"></a>input_arg {
<a name="line-12499"></a>  description: "Should be from a Variable()."
<a name="line-12500"></a>  is_ref: true
<a name="line-12501"></a>  name: "var"
<a name="line-12502"></a>  type_attr: "T"
<a name="line-12503"></a>}
<a name="line-12504"></a>input_arg {
<a name="line-12505"></a>  description: "Should be from a Variable()."
<a name="line-12506"></a>  is_ref: true
<a name="line-12507"></a>  name: "accum"
<a name="line-12508"></a>  type_attr: "T"
<a name="line-12509"></a>}
<a name="line-12510"></a>input_arg {
<a name="line-12511"></a>  description: "Should be from a Variable()."
<a name="line-12512"></a>  is_ref: true
<a name="line-12513"></a>  name: "accum_update"
<a name="line-12514"></a>  type_attr: "T"
<a name="line-12515"></a>}
<a name="line-12516"></a>input_arg {
<a name="line-12517"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12518"></a>  name: "lr"
<a name="line-12519"></a>  type_attr: "T"
<a name="line-12520"></a>}
<a name="line-12521"></a>input_arg {
<a name="line-12522"></a>  description: "Decay factor. Must be a scalar."
<a name="line-12523"></a>  name: "rho"
<a name="line-12524"></a>  type_attr: "T"
<a name="line-12525"></a>}
<a name="line-12526"></a>input_arg {
<a name="line-12527"></a>  description: "Constant factor. Must be a scalar."
<a name="line-12528"></a>  name: "epsilon"
<a name="line-12529"></a>  type_attr: "T"
<a name="line-12530"></a>}
<a name="line-12531"></a>input_arg {
<a name="line-12532"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12533"></a>}
<a name="line-12534"></a>output_arg {
<a name="line-12535"></a>  description: "Same as \"var\"."
<a name="line-12536"></a>  is_ref: true
<a name="line-12537"></a>  name: "out"
<a name="line-12538"></a>  type_attr: "T"
<a name="line-12539"></a>}
<a name="line-12540"></a>-}</span>
<a name="line-12541"></a>
<a name="line-12542"></a><a name="sparseApplyProximalGradientDescent"></a><span class='hs-comment'>-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.</span>
<a name="line-12543"></a><span class='hs-comment'>--</span>
<a name="line-12544"></a><span class='hs-comment'>-- That is for rows we have grad for, we update var as follows:</span>
<a name="line-12545"></a><span class='hs-comment'>-- prox_v = var - alpha * grad</span>
<a name="line-12546"></a><span class='hs-comment'>-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}</span>
<a name="line-12547"></a><span class='hs-definition'>sparseApplyProximalGradientDescent</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span>
<a name="line-12548"></a>                                      <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12549"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12550"></a>                                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12551"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12552"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12553"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12554"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12555"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12556"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12557"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12558"></a>                                                  <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-12559"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12560"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12561"></a>                                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12562"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __alpha__: Scaling factor. Must be a scalar.</span>
<a name="line-12563"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-12564"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-12565"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12566"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-12567"></a>                                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12568"></a><span class='hs-definition'>sparseApplyProximalGradientDescent</span> <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span>
<a name="line-12569"></a>                                   <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12570"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyProximalGradientDescent"</span>
<a name="line-12571"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-12572"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12573"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-12574"></a><span class='hs-comment'>{-
<a name="line-12575"></a>attr {
<a name="line-12576"></a>  allowed_values {
<a name="line-12577"></a>    list {
<a name="line-12578"></a>      type: DT_FLOAT
<a name="line-12579"></a>      type: DT_DOUBLE
<a name="line-12580"></a>      type: DT_INT64
<a name="line-12581"></a>      type: DT_INT32
<a name="line-12582"></a>      type: DT_UINT8
<a name="line-12583"></a>      type: DT_UINT16
<a name="line-12584"></a>      type: DT_INT16
<a name="line-12585"></a>      type: DT_INT8
<a name="line-12586"></a>      type: DT_COMPLEX64
<a name="line-12587"></a>      type: DT_COMPLEX128
<a name="line-12588"></a>      type: DT_QINT8
<a name="line-12589"></a>      type: DT_QUINT8
<a name="line-12590"></a>      type: DT_QINT32
<a name="line-12591"></a>      type: DT_HALF
<a name="line-12592"></a>    }
<a name="line-12593"></a>  }
<a name="line-12594"></a>  name: "T"
<a name="line-12595"></a>  type: "type"
<a name="line-12596"></a>}
<a name="line-12597"></a>attr {
<a name="line-12598"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-12599"></a>  name: "Tindices"
<a name="line-12600"></a>  type: "type"
<a name="line-12601"></a>}
<a name="line-12602"></a>attr {
<a name="line-12603"></a>  default_value { b: false }
<a name="line-12604"></a>  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12605"></a>  name: "use_locking"
<a name="line-12606"></a>  type: "bool"
<a name="line-12607"></a>}
<a name="line-12608"></a>input_arg {
<a name="line-12609"></a>  description: "Should be from a Variable()."
<a name="line-12610"></a>  is_ref: true
<a name="line-12611"></a>  name: "var"
<a name="line-12612"></a>  type_attr: "T"
<a name="line-12613"></a>}
<a name="line-12614"></a>input_arg {
<a name="line-12615"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12616"></a>  name: "alpha"
<a name="line-12617"></a>  type_attr: "T"
<a name="line-12618"></a>}
<a name="line-12619"></a>input_arg {
<a name="line-12620"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12621"></a>  name: "l1"
<a name="line-12622"></a>  type_attr: "T"
<a name="line-12623"></a>}
<a name="line-12624"></a>input_arg {
<a name="line-12625"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12626"></a>  name: "l2"
<a name="line-12627"></a>  type_attr: "T"
<a name="line-12628"></a>}
<a name="line-12629"></a>input_arg {
<a name="line-12630"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12631"></a>}
<a name="line-12632"></a>input_arg {
<a name="line-12633"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-12634"></a>  name: "indices"
<a name="line-12635"></a>  type_attr: "Tindices"
<a name="line-12636"></a>}
<a name="line-12637"></a>output_arg {
<a name="line-12638"></a>  description: "Same as \"var\"."
<a name="line-12639"></a>  is_ref: true
<a name="line-12640"></a>  name: "out"
<a name="line-12641"></a>  type_attr: "T"
<a name="line-12642"></a>}
<a name="line-12643"></a>-}</span>
<a name="line-12644"></a>
<a name="line-12645"></a><a name="applyProximalGradientDescent"></a><span class='hs-comment'>-- | Update '*var' as FOBOS algorithm with fixed learning rate.</span>
<a name="line-12646"></a><span class='hs-comment'>--</span>
<a name="line-12647"></a><span class='hs-comment'>-- prox_v = var - alpha * delta</span>
<a name="line-12648"></a><span class='hs-comment'>-- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}</span>
<a name="line-12649"></a><span class='hs-definition'>applyProximalGradientDescent</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12650"></a>                                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12651"></a>                                                                <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12652"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12653"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12654"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12655"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12656"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12657"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12658"></a>                                                                <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12659"></a>                                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12660"></a>                                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12661"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __alpha__: Scaling factor. Must be a scalar.</span>
<a name="line-12662"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-12663"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-12664"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __delta__: The change.</span>
<a name="line-12665"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12666"></a><span class='hs-definition'>applyProximalGradientDescent</span> <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>delta</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12667"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyProximalGradientDescent"</span>
<a name="line-12668"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12669"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>delta</span>
<a name="line-12670"></a><span class='hs-comment'>{-
<a name="line-12671"></a>attr {
<a name="line-12672"></a>  allowed_values {
<a name="line-12673"></a>    list {
<a name="line-12674"></a>      type: DT_FLOAT
<a name="line-12675"></a>      type: DT_DOUBLE
<a name="line-12676"></a>      type: DT_INT64
<a name="line-12677"></a>      type: DT_INT32
<a name="line-12678"></a>      type: DT_UINT8
<a name="line-12679"></a>      type: DT_UINT16
<a name="line-12680"></a>      type: DT_INT16
<a name="line-12681"></a>      type: DT_INT8
<a name="line-12682"></a>      type: DT_COMPLEX64
<a name="line-12683"></a>      type: DT_COMPLEX128
<a name="line-12684"></a>      type: DT_QINT8
<a name="line-12685"></a>      type: DT_QUINT8
<a name="line-12686"></a>      type: DT_QINT32
<a name="line-12687"></a>      type: DT_HALF
<a name="line-12688"></a>    }
<a name="line-12689"></a>  }
<a name="line-12690"></a>  name: "T"
<a name="line-12691"></a>  type: "type"
<a name="line-12692"></a>}
<a name="line-12693"></a>attr {
<a name="line-12694"></a>  default_value { b: false }
<a name="line-12695"></a>  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12696"></a>  name: "use_locking"
<a name="line-12697"></a>  type: "bool"
<a name="line-12698"></a>}
<a name="line-12699"></a>input_arg {
<a name="line-12700"></a>  description: "Should be from a Variable()."
<a name="line-12701"></a>  is_ref: true
<a name="line-12702"></a>  name: "var"
<a name="line-12703"></a>  type_attr: "T"
<a name="line-12704"></a>}
<a name="line-12705"></a>input_arg {
<a name="line-12706"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12707"></a>  name: "alpha"
<a name="line-12708"></a>  type_attr: "T"
<a name="line-12709"></a>}
<a name="line-12710"></a>input_arg {
<a name="line-12711"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12712"></a>  name: "l1"
<a name="line-12713"></a>  type_attr: "T"
<a name="line-12714"></a>}
<a name="line-12715"></a>input_arg {
<a name="line-12716"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12717"></a>  name: "l2"
<a name="line-12718"></a>  type_attr: "T"
<a name="line-12719"></a>}
<a name="line-12720"></a>input_arg {
<a name="line-12721"></a>  description: "The change." name: "delta" type_attr: "T"
<a name="line-12722"></a>}
<a name="line-12723"></a>output_arg {
<a name="line-12724"></a>  description: "Same as \"var\"."
<a name="line-12725"></a>  is_ref: true
<a name="line-12726"></a>  name: "out"
<a name="line-12727"></a>  type_attr: "T"
<a name="line-12728"></a>}
<a name="line-12729"></a>-}</span>
<a name="line-12730"></a>
<a name="line-12731"></a><a name="matrixSolve"></a><span class='hs-comment'>-- | Solves systems of linear equations.</span>
<a name="line-12732"></a><span class='hs-comment'>--</span>
<a name="line-12733"></a><span class='hs-comment'>-- `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions</span>
<a name="line-12734"></a><span class='hs-comment'>-- form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is</span>
<a name="line-12735"></a><span class='hs-comment'>-- a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix</span>
<a name="line-12736"></a><span class='hs-comment'>-- satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.</span>
<a name="line-12737"></a><span class='hs-comment'>-- If `adjoint` is `True` then each output matrix satisfies</span>
<a name="line-12738"></a><span class='hs-comment'>-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.</span>
<a name="line-12739"></a><span class='hs-definition'>matrixSolve</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12740"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12741"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12742"></a>                                         <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12743"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__: Shape is `[..., M, M]`.</span>
<a name="line-12744"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__: Shape is `[..., M, K]`.</span>
<a name="line-12745"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., M, K]`.</span>
<a name="line-12746"></a><span class='hs-definition'>matrixSolve</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12747"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixSolve"</span>
<a name="line-12748"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12749"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span>
<a name="line-12750"></a><span class='hs-comment'>{-
<a name="line-12751"></a>attr {
<a name="line-12752"></a>  default_value { b: false }
<a name="line-12753"></a>  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint."
<a name="line-12754"></a>  name: "adjoint"
<a name="line-12755"></a>  type: "bool"
<a name="line-12756"></a>}
<a name="line-12757"></a>attr {
<a name="line-12758"></a>  allowed_values {
<a name="line-12759"></a>    list {
<a name="line-12760"></a>      type: DT_DOUBLE
<a name="line-12761"></a>      type: DT_FLOAT
<a name="line-12762"></a>      type: DT_COMPLEX64
<a name="line-12763"></a>      type: DT_COMPLEX128
<a name="line-12764"></a>    }
<a name="line-12765"></a>  }
<a name="line-12766"></a>  name: "T"
<a name="line-12767"></a>  type: "type"
<a name="line-12768"></a>}
<a name="line-12769"></a>input_arg {
<a name="line-12770"></a>  description: "Shape is `[..., M, M]`."
<a name="line-12771"></a>  name: "matrix"
<a name="line-12772"></a>  type_attr: "T"
<a name="line-12773"></a>}
<a name="line-12774"></a>input_arg {
<a name="line-12775"></a>  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
<a name="line-12776"></a>}
<a name="line-12777"></a>output_arg {
<a name="line-12778"></a>  description: "Shape is `[..., M, K]`."
<a name="line-12779"></a>  name: "output"
<a name="line-12780"></a>  type_attr: "T"
<a name="line-12781"></a>}
<a name="line-12782"></a>-}</span>
<a name="line-12783"></a>
<a name="line-12784"></a><a name="sparseApplyProximalAdagrad"></a><span class='hs-comment'>-- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.</span>
<a name="line-12785"></a><span class='hs-comment'>--</span>
<a name="line-12786"></a><span class='hs-comment'>-- That is for rows we have grad for, we update var and accum as follows:</span>
<a name="line-12787"></a><span class='hs-comment'>-- accum += grad * grad</span>
<a name="line-12788"></a><span class='hs-comment'>-- prox_v = var</span>
<a name="line-12789"></a><span class='hs-comment'>-- prox_v -= lr * grad * (1 / sqrt(accum))</span>
<a name="line-12790"></a><span class='hs-comment'>-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}</span>
<a name="line-12791"></a><span class='hs-definition'>sparseApplyProximalAdagrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12792"></a>                                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12793"></a>                                                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12794"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12795"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12796"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12797"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12798"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12799"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12800"></a>                                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12801"></a>                                                                          <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12802"></a>                                                                  <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-12803"></a>                                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12804"></a>                                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12805"></a>                              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12806"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-12807"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Learning rate. Must be a scalar.</span>
<a name="line-12808"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-12809"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-12810"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-12811"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-12812"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12813"></a><span class='hs-definition'>sparseApplyProximalAdagrad</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12814"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyProximalAdagrad"</span>
<a name="line-12815"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-12816"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12817"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-12818"></a><span class='hs-comment'>{-
<a name="line-12819"></a>attr {
<a name="line-12820"></a>  allowed_values {
<a name="line-12821"></a>    list {
<a name="line-12822"></a>      type: DT_FLOAT
<a name="line-12823"></a>      type: DT_DOUBLE
<a name="line-12824"></a>      type: DT_INT64
<a name="line-12825"></a>      type: DT_INT32
<a name="line-12826"></a>      type: DT_UINT8
<a name="line-12827"></a>      type: DT_UINT16
<a name="line-12828"></a>      type: DT_INT16
<a name="line-12829"></a>      type: DT_INT8
<a name="line-12830"></a>      type: DT_COMPLEX64
<a name="line-12831"></a>      type: DT_COMPLEX128
<a name="line-12832"></a>      type: DT_QINT8
<a name="line-12833"></a>      type: DT_QUINT8
<a name="line-12834"></a>      type: DT_QINT32
<a name="line-12835"></a>      type: DT_HALF
<a name="line-12836"></a>    }
<a name="line-12837"></a>  }
<a name="line-12838"></a>  name: "T"
<a name="line-12839"></a>  type: "type"
<a name="line-12840"></a>}
<a name="line-12841"></a>attr {
<a name="line-12842"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-12843"></a>  name: "Tindices"
<a name="line-12844"></a>  type: "type"
<a name="line-12845"></a>}
<a name="line-12846"></a>attr {
<a name="line-12847"></a>  default_value { b: false }
<a name="line-12848"></a>  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12849"></a>  name: "use_locking"
<a name="line-12850"></a>  type: "bool"
<a name="line-12851"></a>}
<a name="line-12852"></a>input_arg {
<a name="line-12853"></a>  description: "Should be from a Variable()."
<a name="line-12854"></a>  is_ref: true
<a name="line-12855"></a>  name: "var"
<a name="line-12856"></a>  type_attr: "T"
<a name="line-12857"></a>}
<a name="line-12858"></a>input_arg {
<a name="line-12859"></a>  description: "Should be from a Variable()."
<a name="line-12860"></a>  is_ref: true
<a name="line-12861"></a>  name: "accum"
<a name="line-12862"></a>  type_attr: "T"
<a name="line-12863"></a>}
<a name="line-12864"></a>input_arg {
<a name="line-12865"></a>  description: "Learning rate. Must be a scalar."
<a name="line-12866"></a>  name: "lr"
<a name="line-12867"></a>  type_attr: "T"
<a name="line-12868"></a>}
<a name="line-12869"></a>input_arg {
<a name="line-12870"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-12871"></a>  name: "l1"
<a name="line-12872"></a>  type_attr: "T"
<a name="line-12873"></a>}
<a name="line-12874"></a>input_arg {
<a name="line-12875"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-12876"></a>  name: "l2"
<a name="line-12877"></a>  type_attr: "T"
<a name="line-12878"></a>}
<a name="line-12879"></a>input_arg {
<a name="line-12880"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-12881"></a>}
<a name="line-12882"></a>input_arg {
<a name="line-12883"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-12884"></a>  name: "indices"
<a name="line-12885"></a>  type_attr: "Tindices"
<a name="line-12886"></a>}
<a name="line-12887"></a>output_arg {
<a name="line-12888"></a>  description: "Same as \"var\"."
<a name="line-12889"></a>  is_ref: true
<a name="line-12890"></a>  name: "out"
<a name="line-12891"></a>  type_attr: "T"
<a name="line-12892"></a>}
<a name="line-12893"></a>-}</span>
<a name="line-12894"></a>
<a name="line-12895"></a><span class='hs-comment'>-- | Update '*var' by subtracting 'alpha' * 'delta' from it.</span>
<a name="line-12896"></a>
<a name="line-12897"></a><a name="applyGradientDescent"></a><span class='hs-definition'>applyGradientDescent</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12898"></a>                                          <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12899"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12900"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12901"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12902"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12903"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12904"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12905"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12906"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-12907"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __alpha__: Scaling factor. Must be a scalar.</span>
<a name="line-12908"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __delta__: The change.</span>
<a name="line-12909"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-12910"></a><span class='hs-definition'>applyGradientDescent</span> <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>delta</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12911"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyGradientDescent"</span>
<a name="line-12912"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-12913"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>alpha</span> <span class='hs-varid'>delta</span>
<a name="line-12914"></a><span class='hs-comment'>{-
<a name="line-12915"></a>attr {
<a name="line-12916"></a>  allowed_values {
<a name="line-12917"></a>    list {
<a name="line-12918"></a>      type: DT_FLOAT
<a name="line-12919"></a>      type: DT_DOUBLE
<a name="line-12920"></a>      type: DT_INT64
<a name="line-12921"></a>      type: DT_INT32
<a name="line-12922"></a>      type: DT_UINT8
<a name="line-12923"></a>      type: DT_UINT16
<a name="line-12924"></a>      type: DT_INT16
<a name="line-12925"></a>      type: DT_INT8
<a name="line-12926"></a>      type: DT_COMPLEX64
<a name="line-12927"></a>      type: DT_COMPLEX128
<a name="line-12928"></a>      type: DT_QINT8
<a name="line-12929"></a>      type: DT_QUINT8
<a name="line-12930"></a>      type: DT_QINT32
<a name="line-12931"></a>      type: DT_HALF
<a name="line-12932"></a>    }
<a name="line-12933"></a>  }
<a name="line-12934"></a>  name: "T"
<a name="line-12935"></a>  type: "type"
<a name="line-12936"></a>}
<a name="line-12937"></a>attr {
<a name="line-12938"></a>  default_value { b: false }
<a name="line-12939"></a>  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-12940"></a>  name: "use_locking"
<a name="line-12941"></a>  type: "bool"
<a name="line-12942"></a>}
<a name="line-12943"></a>input_arg {
<a name="line-12944"></a>  description: "Should be from a Variable()."
<a name="line-12945"></a>  is_ref: true
<a name="line-12946"></a>  name: "var"
<a name="line-12947"></a>  type_attr: "T"
<a name="line-12948"></a>}
<a name="line-12949"></a>input_arg {
<a name="line-12950"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-12951"></a>  name: "alpha"
<a name="line-12952"></a>  type_attr: "T"
<a name="line-12953"></a>}
<a name="line-12954"></a>input_arg {
<a name="line-12955"></a>  description: "The change." name: "delta" type_attr: "T"
<a name="line-12956"></a>}
<a name="line-12957"></a>output_arg {
<a name="line-12958"></a>  description: "Same as \"var\"."
<a name="line-12959"></a>  is_ref: true
<a name="line-12960"></a>  name: "out"
<a name="line-12961"></a>  type_attr: "T"
<a name="line-12962"></a>}
<a name="line-12963"></a>-}</span>
<a name="line-12964"></a>
<a name="line-12965"></a><a name="batchNormWithGlobalNormalization"></a><span class='hs-comment'>-- | Batch normalization.</span>
<a name="line-12966"></a><span class='hs-comment'>--</span>
<a name="line-12967"></a><span class='hs-comment'>-- This op is deprecated. Prefer `tf.nn.batch_normalization`.</span>
<a name="line-12968"></a><span class='hs-definition'>batchNormWithGlobalNormalization</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-12969"></a>                                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12970"></a>                                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-12971"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-12972"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-12973"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-12974"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-12975"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-12976"></a>                                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-12977"></a>                                                                       <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-12978"></a>                                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-12979"></a>                                    <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor</span>
<a name="line-12980"></a>                                         <span class='hs-comment'>-- needs to be multiplied with gamma.</span>
<a name="line-12981"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __variance_epsilon__: A small float number to avoid dividing by 0.</span>
<a name="line-12982"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __t__: A 4D input Tensor.</span>
<a name="line-12983"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.</span>
<a name="line-12984"></a>                                                   <span class='hs-comment'>-- This is the first output from tf.nn.moments,</span>
<a name="line-12985"></a>                                                   <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-12986"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.</span>
<a name="line-12987"></a>                                                   <span class='hs-comment'>-- This is the second output from tf.nn.moments,</span>
<a name="line-12988"></a>                                                   <span class='hs-comment'>-- or a saved moving average thereof.</span>
<a name="line-12989"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.</span>
<a name="line-12990"></a>                                                   <span class='hs-comment'>-- An offset to be added to the normalized tensor.</span>
<a name="line-12991"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.</span>
<a name="line-12992"></a>                                                   <span class='hs-comment'>-- If "scale_after_normalization" is true, this tensor will be multiplied</span>
<a name="line-12993"></a>                                                   <span class='hs-comment'>-- with the normalized tensor.</span>
<a name="line-12994"></a>                                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __result__</span>
<a name="line-12995"></a><span class='hs-definition'>batchNormWithGlobalNormalization</span> <span class='hs-varid'>scale_after_normalization</span> <span class='hs-varid'>variance_epsilon</span> <span class='hs-varid'>t</span> <span class='hs-varid'>m</span>
<a name="line-12996"></a>                                 <span class='hs-varid'>v</span> <span class='hs-varid'>beta</span> <span class='hs-varid'>gamma</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-12997"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchNormWithGlobalNormalization"</span>
<a name="line-12998"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-12999"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"scale_after_normalization"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>scale_after_normalization</span>
<a name="line-13000"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"variance_epsilon"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>variance_epsilon</span><span class='hs-layout'>)</span>
<a name="line-13001"></a>        <span class='hs-varid'>t</span> <span class='hs-varid'>m</span> <span class='hs-varid'>v</span> <span class='hs-varid'>beta</span> <span class='hs-varid'>gamma</span>
<a name="line-13002"></a><span class='hs-comment'>{-
<a name="line-13003"></a>attr {
<a name="line-13004"></a>  allowed_values {
<a name="line-13005"></a>    list {
<a name="line-13006"></a>      type: DT_FLOAT
<a name="line-13007"></a>      type: DT_DOUBLE
<a name="line-13008"></a>      type: DT_INT64
<a name="line-13009"></a>      type: DT_INT32
<a name="line-13010"></a>      type: DT_UINT8
<a name="line-13011"></a>      type: DT_UINT16
<a name="line-13012"></a>      type: DT_INT16
<a name="line-13013"></a>      type: DT_INT8
<a name="line-13014"></a>      type: DT_COMPLEX64
<a name="line-13015"></a>      type: DT_COMPLEX128
<a name="line-13016"></a>      type: DT_QINT8
<a name="line-13017"></a>      type: DT_QUINT8
<a name="line-13018"></a>      type: DT_QINT32
<a name="line-13019"></a>      type: DT_HALF
<a name="line-13020"></a>    }
<a name="line-13021"></a>  }
<a name="line-13022"></a>  name: "T"
<a name="line-13023"></a>  type: "type"
<a name="line-13024"></a>}
<a name="line-13025"></a>attr {
<a name="line-13026"></a>  description: "A small float number to avoid dividing by 0."
<a name="line-13027"></a>  name: "variance_epsilon"
<a name="line-13028"></a>  type: "float"
<a name="line-13029"></a>}
<a name="line-13030"></a>attr {
<a name="line-13031"></a>  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
<a name="line-13032"></a>  name: "scale_after_normalization"
<a name="line-13033"></a>  type: "bool"
<a name="line-13034"></a>}
<a name="line-13035"></a>input_arg {
<a name="line-13036"></a>  description: "A 4D input Tensor." name: "t" type_attr: "T"
<a name="line-13037"></a>}
<a name="line-13038"></a>input_arg {
<a name="line-13039"></a>  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-13040"></a>  name: "m"
<a name="line-13041"></a>  type_attr: "T"
<a name="line-13042"></a>}
<a name="line-13043"></a>input_arg {
<a name="line-13044"></a>  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
<a name="line-13045"></a>  name: "v"
<a name="line-13046"></a>  type_attr: "T"
<a name="line-13047"></a>}
<a name="line-13048"></a>input_arg {
<a name="line-13049"></a>  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
<a name="line-13050"></a>  name: "beta"
<a name="line-13051"></a>  type_attr: "T"
<a name="line-13052"></a>}
<a name="line-13053"></a>input_arg {
<a name="line-13054"></a>  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
<a name="line-13055"></a>  name: "gamma"
<a name="line-13056"></a>  type_attr: "T"
<a name="line-13057"></a>}
<a name="line-13058"></a>output_arg { name: "result" type_attr: "T" }
<a name="line-13059"></a>-}</span>
<a name="line-13060"></a>
<a name="line-13061"></a><a name="encodeBase64"></a><span class='hs-comment'>-- | Encode strings into web-safe base64 format.</span>
<a name="line-13062"></a><span class='hs-comment'>--</span>
<a name="line-13063"></a><span class='hs-comment'>-- Refer to the following article for more information on base64 format:</span>
<a name="line-13064"></a><span class='hs-comment'>-- en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the</span>
<a name="line-13065"></a><span class='hs-comment'>-- end so that the encoded has length multiple of 4. See Padding section of the</span>
<a name="line-13066"></a><span class='hs-comment'>-- link above.</span>
<a name="line-13067"></a><span class='hs-comment'>-- </span>
<a name="line-13068"></a><span class='hs-comment'>-- Web-safe means that the encoder uses - and _ instead of + and /.</span>
<a name="line-13069"></a><span class='hs-definition'>encodeBase64</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: Strings to be encoded.</span>
<a name="line-13070"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__: Input strings encoded in base64.</span>
<a name="line-13071"></a><span class='hs-definition'>encodeBase64</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13072"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"EncodeBase64"</span><span class='hs-layout'>)</span>
<a name="line-13073"></a>        <span class='hs-varid'>input</span>
<a name="line-13074"></a><span class='hs-comment'>{-
<a name="line-13075"></a>attr {
<a name="line-13076"></a>  default_value { b: false }
<a name="line-13077"></a>  description: "Bool whether padding is applied at the ends."
<a name="line-13078"></a>  name: "pad"
<a name="line-13079"></a>  type: "bool"
<a name="line-13080"></a>}
<a name="line-13081"></a>input_arg {
<a name="line-13082"></a>  description: "Strings to be encoded." name: "input" type: DT_STRING
<a name="line-13083"></a>}
<a name="line-13084"></a>output_arg {
<a name="line-13085"></a>  description: "Input strings encoded in base64."
<a name="line-13086"></a>  name: "output"
<a name="line-13087"></a>  type: DT_STRING
<a name="line-13088"></a>}
<a name="line-13089"></a>-}</span>
<a name="line-13090"></a>
<a name="line-13091"></a><a name="stringJoin"></a><span class='hs-comment'>-- | Joins the strings in the given list of string tensors into one tensor;</span>
<a name="line-13092"></a><span class='hs-comment'>--</span>
<a name="line-13093"></a><span class='hs-comment'>-- with the given separator (default is an empty separator).</span>
<a name="line-13094"></a><span class='hs-definition'>stringJoin</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: A list of string tensors.  The tensors must all have the same shape,</span>
<a name="line-13095"></a>                                                     <span class='hs-comment'>-- or be scalars.  Scalars may be mixed in; these will be broadcast to the shape</span>
<a name="line-13096"></a>                                                     <span class='hs-comment'>-- of non-scalar inputs.</span>
<a name="line-13097"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-13098"></a><span class='hs-definition'>stringJoin</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13099"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringJoin"</span>
<a name="line-13100"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-13101"></a>        <span class='hs-varid'>inputs</span>
<a name="line-13102"></a>  <span class='hs-keyword'>where</span>
<a name="line-13103"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-13104"></a><span class='hs-comment'>{-
<a name="line-13105"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-13106"></a>attr {
<a name="line-13107"></a>  default_value { s: "" }
<a name="line-13108"></a>  description: "string, an optional join separator."
<a name="line-13109"></a>  name: "separator"
<a name="line-13110"></a>  type: "string"
<a name="line-13111"></a>}
<a name="line-13112"></a>input_arg {
<a name="line-13113"></a>  description: "A list of string tensors.  The tensors must all have the same shape,\nor be scalars.  Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs."
<a name="line-13114"></a>  name: "inputs"
<a name="line-13115"></a>  number_attr: "N"
<a name="line-13116"></a>  type: DT_STRING
<a name="line-13117"></a>}
<a name="line-13118"></a>output_arg { name: "output" type: DT_STRING }
<a name="line-13119"></a>-}</span>
<a name="line-13120"></a>
<a name="line-13121"></a><span class='hs-comment'>-- | Computes the gradient of the crop_and_resize op wrt the input image tensor.</span>
<a name="line-13122"></a>
<a name="line-13123"></a><a name="cropAndResizeGradImage"></a><span class='hs-definition'>cropAndResizeGradImage</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-13124"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-13125"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13126"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.</span>
<a name="line-13127"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor</span>
<a name="line-13128"></a>                                             <span class='hs-comment'>-- specifies the coordinates of a box in the `box_ind[i]` image and is specified</span>
<a name="line-13129"></a>                                             <span class='hs-comment'>-- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of</span>
<a name="line-13130"></a>                                             <span class='hs-comment'>-- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the</span>
<a name="line-13131"></a>                                             <span class='hs-comment'>-- `[0, 1]` interval of normalized image height is mapped to</span>
<a name="line-13132"></a>                                             <span class='hs-comment'>-- `[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in</span>
<a name="line-13133"></a>                                             <span class='hs-comment'>-- which case the sampled crop is an up-down flipped version of the original</span>
<a name="line-13134"></a>                                             <span class='hs-comment'>-- image. The width dimension is treated similarly. Normalized coordinates</span>
<a name="line-13135"></a>                                             <span class='hs-comment'>-- outside the `[0, 1]` range are allowed, in which case we use</span>
<a name="line-13136"></a>                                             <span class='hs-comment'>-- `extrapolation_value` to extrapolate the input image values.</span>
<a name="line-13137"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.</span>
<a name="line-13138"></a>                                                      <span class='hs-comment'>-- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.</span>
<a name="line-13139"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]`</span>
<a name="line-13140"></a>                                                      <span class='hs-comment'>-- containing the original image size. Both `image_height` and `image_width` need</span>
<a name="line-13141"></a>                                                      <span class='hs-comment'>-- to be positive.</span>
<a name="line-13142"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.</span>
<a name="line-13143"></a><span class='hs-definition'>cropAndResizeGradImage</span> <span class='hs-varid'>grads</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span> <span class='hs-varid'>image_size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13144"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CropAndResizeGradImage"</span>
<a name="line-13145"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13146"></a>        <span class='hs-varid'>grads</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span> <span class='hs-varid'>image_size</span>
<a name="line-13147"></a><span class='hs-comment'>{-
<a name="line-13148"></a>attr {
<a name="line-13149"></a>  allowed_values {
<a name="line-13150"></a>    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
<a name="line-13151"></a>  }
<a name="line-13152"></a>  name: "T"
<a name="line-13153"></a>  type: "type"
<a name="line-13154"></a>}
<a name="line-13155"></a>attr {
<a name="line-13156"></a>  allowed_values { list { s: "bilinear" } }
<a name="line-13157"></a>  default_value { s: "bilinear" }
<a name="line-13158"></a>  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
<a name="line-13159"></a>  name: "method"
<a name="line-13160"></a>  type: "string"
<a name="line-13161"></a>}
<a name="line-13162"></a>input_arg {
<a name="line-13163"></a>  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
<a name="line-13164"></a>  name: "grads"
<a name="line-13165"></a>  type: DT_FLOAT
<a name="line-13166"></a>}
<a name="line-13167"></a>input_arg {
<a name="line-13168"></a>  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
<a name="line-13169"></a>  name: "boxes"
<a name="line-13170"></a>  type: DT_FLOAT
<a name="line-13171"></a>}
<a name="line-13172"></a>input_arg {
<a name="line-13173"></a>  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
<a name="line-13174"></a>  name: "box_ind"
<a name="line-13175"></a>  type: DT_INT32
<a name="line-13176"></a>}
<a name="line-13177"></a>input_arg {
<a name="line-13178"></a>  description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive."
<a name="line-13179"></a>  name: "image_size"
<a name="line-13180"></a>  type: DT_INT32
<a name="line-13181"></a>}
<a name="line-13182"></a>output_arg {
<a name="line-13183"></a>  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`."
<a name="line-13184"></a>  name: "output"
<a name="line-13185"></a>  type_attr: "T"
<a name="line-13186"></a>}
<a name="line-13187"></a>-}</span>
<a name="line-13188"></a>
<a name="line-13189"></a><span class='hs-comment'>-- | Computes hyperbolic tangent of `x` element-wise.</span>
<a name="line-13190"></a>
<a name="line-13191"></a><a name="tanh"></a><span class='hs-definition'>tanh</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13192"></a>                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13193"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-13194"></a>                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-13195"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-13196"></a><span class='hs-definition'>tanh</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13197"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Tanh"</span>
<a name="line-13198"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13199"></a>        <span class='hs-varid'>x</span>
<a name="line-13200"></a><span class='hs-comment'>{-
<a name="line-13201"></a>attr {
<a name="line-13202"></a>  allowed_values {
<a name="line-13203"></a>    list {
<a name="line-13204"></a>      type: DT_HALF
<a name="line-13205"></a>      type: DT_FLOAT
<a name="line-13206"></a>      type: DT_DOUBLE
<a name="line-13207"></a>      type: DT_COMPLEX64
<a name="line-13208"></a>      type: DT_COMPLEX128
<a name="line-13209"></a>    }
<a name="line-13210"></a>  }
<a name="line-13211"></a>  name: "T"
<a name="line-13212"></a>  type: "type"
<a name="line-13213"></a>}
<a name="line-13214"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-13215"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-13216"></a>-}</span>
<a name="line-13217"></a>
<a name="line-13218"></a><a name="asString"></a><span class='hs-comment'>-- | Converts each entry in the given tensor to strings.  Supports many numeric</span>
<a name="line-13219"></a><span class='hs-comment'>--</span>
<a name="line-13220"></a><span class='hs-comment'>-- types and boolean.</span>
<a name="line-13221"></a><span class='hs-definition'>asString</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13222"></a>                                                 <span class='hs-conid'>Bool</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13223"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-13224"></a>                                                 <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13225"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-13226"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-13227"></a><span class='hs-definition'>asString</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13228"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AsString"</span>
<a name="line-13229"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13230"></a>        <span class='hs-varid'>input</span>
<a name="line-13231"></a><span class='hs-comment'>{-
<a name="line-13232"></a>attr {
<a name="line-13233"></a>  allowed_values {
<a name="line-13234"></a>    list {
<a name="line-13235"></a>      type: DT_INT32
<a name="line-13236"></a>      type: DT_INT64
<a name="line-13237"></a>      type: DT_COMPLEX64
<a name="line-13238"></a>      type: DT_FLOAT
<a name="line-13239"></a>      type: DT_DOUBLE
<a name="line-13240"></a>      type: DT_BOOL
<a name="line-13241"></a>      type: DT_INT8
<a name="line-13242"></a>    }
<a name="line-13243"></a>  }
<a name="line-13244"></a>  name: "T"
<a name="line-13245"></a>  type: "type"
<a name="line-13246"></a>}
<a name="line-13247"></a>attr {
<a name="line-13248"></a>  default_value { i: -1 }
<a name="line-13249"></a>  description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision &gt; -1."
<a name="line-13250"></a>  name: "precision"
<a name="line-13251"></a>  type: "int"
<a name="line-13252"></a>}
<a name="line-13253"></a>attr {
<a name="line-13254"></a>  default_value { b: false }
<a name="line-13255"></a>  description: "Use scientific notation for floating point numbers."
<a name="line-13256"></a>  name: "scientific"
<a name="line-13257"></a>  type: "bool"
<a name="line-13258"></a>}
<a name="line-13259"></a>attr {
<a name="line-13260"></a>  default_value { b: false }
<a name="line-13261"></a>  description: "Use shortest representation (either scientific or standard) for\nfloating point numbers."
<a name="line-13262"></a>  name: "shortest"
<a name="line-13263"></a>  type: "bool"
<a name="line-13264"></a>}
<a name="line-13265"></a>attr {
<a name="line-13266"></a>  default_value { i: -1 }
<a name="line-13267"></a>  description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width &gt; -1."
<a name="line-13268"></a>  name: "width"
<a name="line-13269"></a>  type: "int"
<a name="line-13270"></a>}
<a name="line-13271"></a>attr {
<a name="line-13272"></a>  default_value { s: "" }
<a name="line-13273"></a>  description: "The value to pad if width &gt; -1.  If empty, pads with spaces.\nAnother typical value is \'0\'.  String cannot be longer than 1 character."
<a name="line-13274"></a>  name: "fill"
<a name="line-13275"></a>  type: "string"
<a name="line-13276"></a>}
<a name="line-13277"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-13278"></a>output_arg { name: "output" type: DT_STRING }
<a name="line-13279"></a>-}</span>
<a name="line-13280"></a>
<a name="line-13281"></a><a name="iFFT2D"></a><span class='hs-comment'>-- | Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most</span>
<a name="line-13282"></a><span class='hs-comment'>--</span>
<a name="line-13283"></a><span class='hs-comment'>-- 2 dimensions of `input`.</span>
<a name="line-13284"></a><span class='hs-definition'>iFFT2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__: A complex64 tensor.</span>
<a name="line-13285"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2</span>
<a name="line-13286"></a>          <span class='hs-comment'>--   dimensions of `input` are replaced with their inverse 2D Fourier Transform.</span>
<a name="line-13287"></a>          <span class='hs-comment'>-- </span>
<a name="line-13288"></a>          <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-13289"></a>          <span class='hs-comment'>-- Equivalent to np.ifft2</span>
<a name="line-13290"></a>          <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-13291"></a><span class='hs-definition'>iFFT2D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13292"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IFFT2D"</span><span class='hs-layout'>)</span>
<a name="line-13293"></a>        <span class='hs-varid'>input</span>
<a name="line-13294"></a><span class='hs-comment'>{-
<a name="line-13295"></a>input_arg {
<a name="line-13296"></a>  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
<a name="line-13297"></a>}
<a name="line-13298"></a>output_arg {
<a name="line-13299"></a>  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their inverse 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.ifft2\n@end_compatibility"
<a name="line-13300"></a>  name: "output"
<a name="line-13301"></a>  type: DT_COMPLEX64
<a name="line-13302"></a>}
<a name="line-13303"></a>-}</span>
<a name="line-13304"></a>
<a name="line-13305"></a><a name="sparseConcat"></a><span class='hs-comment'>-- | Concatenates a list of `SparseTensor` along the specified dimension.</span>
<a name="line-13306"></a><span class='hs-comment'>--</span>
<a name="line-13307"></a><span class='hs-comment'>-- Concatenation is with respect to the dense versions of these sparse tensors.</span>
<a name="line-13308"></a><span class='hs-comment'>-- It is assumed that each input is a `SparseTensor` whose elements are ordered</span>
<a name="line-13309"></a><span class='hs-comment'>-- along increasing dimension number.</span>
<a name="line-13310"></a><span class='hs-comment'>-- </span>
<a name="line-13311"></a><span class='hs-comment'>-- All inputs' shapes must match, except for the concat dimension.  The</span>
<a name="line-13312"></a><span class='hs-comment'>-- `indices`, `values`, and `shapes` lists must have the same length.</span>
<a name="line-13313"></a><span class='hs-comment'>-- </span>
<a name="line-13314"></a><span class='hs-comment'>-- The output shape is identical to the inputs', except along the concat</span>
<a name="line-13315"></a><span class='hs-comment'>-- dimension, where it is the sum of the inputs' sizes along that dimension.</span>
<a name="line-13316"></a><span class='hs-comment'>-- </span>
<a name="line-13317"></a><span class='hs-comment'>-- The output elements will be resorted to preserve the sort order along</span>
<a name="line-13318"></a><span class='hs-comment'>-- increasing dimension number.</span>
<a name="line-13319"></a><span class='hs-comment'>-- </span>
<a name="line-13320"></a><span class='hs-comment'>-- This op runs in `O(M log M)` time, where `M` is the total number of non-empty</span>
<a name="line-13321"></a><span class='hs-comment'>-- values across all inputs. This is due to the need for an internal sort in</span>
<a name="line-13322"></a><span class='hs-comment'>-- order to concatenate efficiently across an arbitrary dimension.</span>
<a name="line-13323"></a><span class='hs-comment'>-- </span>
<a name="line-13324"></a><span class='hs-comment'>-- For example, if `concat_dim = 1` and the inputs are</span>
<a name="line-13325"></a><span class='hs-comment'>-- </span>
<a name="line-13326"></a><span class='hs-comment'>--     sp_inputs[0]: shape = [2, 3]</span>
<a name="line-13327"></a><span class='hs-comment'>--     [0, 2]: "a"</span>
<a name="line-13328"></a><span class='hs-comment'>--     [1, 0]: "b"</span>
<a name="line-13329"></a><span class='hs-comment'>--     [1, 1]: "c"</span>
<a name="line-13330"></a><span class='hs-comment'>-- </span>
<a name="line-13331"></a><span class='hs-comment'>--     sp_inputs[1]: shape = [2, 4]</span>
<a name="line-13332"></a><span class='hs-comment'>--     [0, 1]: "d"</span>
<a name="line-13333"></a><span class='hs-comment'>--     [0, 2]: "e"</span>
<a name="line-13334"></a><span class='hs-comment'>-- </span>
<a name="line-13335"></a><span class='hs-comment'>-- then the output will be</span>
<a name="line-13336"></a><span class='hs-comment'>-- </span>
<a name="line-13337"></a><span class='hs-comment'>--     shape = [2, 7]</span>
<a name="line-13338"></a><span class='hs-comment'>--     [0, 2]: "a"</span>
<a name="line-13339"></a><span class='hs-comment'>--     [0, 4]: "d"</span>
<a name="line-13340"></a><span class='hs-comment'>--     [0, 5]: "e"</span>
<a name="line-13341"></a><span class='hs-comment'>--     [1, 0]: "b"</span>
<a name="line-13342"></a><span class='hs-comment'>--     [1, 1]: "c"</span>
<a name="line-13343"></a><span class='hs-comment'>-- </span>
<a name="line-13344"></a><span class='hs-comment'>-- Graphically this is equivalent to doing</span>
<a name="line-13345"></a><span class='hs-comment'>-- </span>
<a name="line-13346"></a><span class='hs-comment'>--     [    a] concat [  d e  ] = [    a   d e  ]</span>
<a name="line-13347"></a><span class='hs-comment'>--     [b c  ]        [       ]   [b c          ]</span>
<a name="line-13348"></a><span class='hs-definition'>sparseConcat</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13349"></a>                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank),</span>
<a name="line-13350"></a>                               <span class='hs-comment'>-- where rank is the number of dimensions in each input `SparseTensor`.</span>
<a name="line-13351"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.</span>
<a name="line-13352"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __values__: 1-D.  Non-empty values of each `SparseTensor`.</span>
<a name="line-13353"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __shapes__: 1-D.  Shapes of each `SparseTensor`.</span>
<a name="line-13354"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-13355"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-13356"></a>                <span class='hs-comment'>-- ^ (__output_indices__, __output_values__, __output_shape__)</span>
<a name="line-13357"></a>                <span class='hs-comment'>--</span>
<a name="line-13358"></a>                <span class='hs-comment'>-- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.</span>
<a name="line-13359"></a>                <span class='hs-comment'>--</span>
<a name="line-13360"></a>                <span class='hs-comment'>-- * __output_values__: 1-D.  Non-empty values of the concatenated `SparseTensor`.</span>
<a name="line-13361"></a>                <span class='hs-comment'>--</span>
<a name="line-13362"></a>                <span class='hs-comment'>-- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.</span>
<a name="line-13363"></a><span class='hs-definition'>sparseConcat</span> <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>values</span>
<a name="line-13364"></a>             <span class='hs-varid'>shapes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"indices"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>indices</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13365"></a>                                            <span class='hs-layout'>(</span><span class='hs-str'>"values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13366"></a>                                            <span class='hs-layout'>(</span><span class='hs-str'>"shapes"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>shapes</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13367"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseConcat"</span>
<a name="line-13368"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13369"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"concat_dim"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>concat_dim</span>
<a name="line-13370"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-13371"></a>        <span class='hs-varid'>indices</span> <span class='hs-varid'>values</span> <span class='hs-varid'>shapes</span>
<a name="line-13372"></a>  <span class='hs-keyword'>where</span>
<a name="line-13373"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>indices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-13374"></a><span class='hs-comment'>{-
<a name="line-13375"></a>attr {
<a name="line-13376"></a>  description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`."
<a name="line-13377"></a>  name: "concat_dim"
<a name="line-13378"></a>  type: "int"
<a name="line-13379"></a>}
<a name="line-13380"></a>attr { has_minimum: true minimum: 2 name: "N" type: "int" }
<a name="line-13381"></a>attr { name: "T" type: "type" }
<a name="line-13382"></a>input_arg {
<a name="line-13383"></a>  description: "2-D.  Indices of each input `SparseTensor`."
<a name="line-13384"></a>  name: "indices"
<a name="line-13385"></a>  number_attr: "N"
<a name="line-13386"></a>  type: DT_INT64
<a name="line-13387"></a>}
<a name="line-13388"></a>input_arg {
<a name="line-13389"></a>  description: "1-D.  Non-empty values of each `SparseTensor`."
<a name="line-13390"></a>  name: "values"
<a name="line-13391"></a>  number_attr: "N"
<a name="line-13392"></a>  type_attr: "T"
<a name="line-13393"></a>}
<a name="line-13394"></a>input_arg {
<a name="line-13395"></a>  description: "1-D.  Shapes of each `SparseTensor`."
<a name="line-13396"></a>  name: "shapes"
<a name="line-13397"></a>  number_attr: "N"
<a name="line-13398"></a>  type: DT_INT64
<a name="line-13399"></a>}
<a name="line-13400"></a>output_arg {
<a name="line-13401"></a>  description: "2-D.  Indices of the concatenated `SparseTensor`."
<a name="line-13402"></a>  name: "output_indices"
<a name="line-13403"></a>  type: DT_INT64
<a name="line-13404"></a>}
<a name="line-13405"></a>output_arg {
<a name="line-13406"></a>  description: "1-D.  Non-empty values of the concatenated `SparseTensor`."
<a name="line-13407"></a>  name: "output_values"
<a name="line-13408"></a>  type_attr: "T"
<a name="line-13409"></a>}
<a name="line-13410"></a>output_arg {
<a name="line-13411"></a>  description: "1-D.  Shape of the concatenated `SparseTensor`."
<a name="line-13412"></a>  name: "output_shape"
<a name="line-13413"></a>  type: DT_INT64
<a name="line-13414"></a>}
<a name="line-13415"></a>-}</span>
<a name="line-13416"></a>
<a name="line-13417"></a><span class='hs-comment'>-- | Generate a glob pattern matching all sharded file names.</span>
<a name="line-13418"></a>
<a name="line-13419"></a><a name="shardedFilespec"></a><span class='hs-definition'>shardedFilespec</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __basename__</span>
<a name="line-13420"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_shards__</span>
<a name="line-13421"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filename__</span>
<a name="line-13422"></a><span class='hs-definition'>shardedFilespec</span> <span class='hs-varid'>basename</span> <span class='hs-varid'>num_shards</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13423"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ShardedFilespec"</span><span class='hs-layout'>)</span>
<a name="line-13424"></a>        <span class='hs-varid'>basename</span> <span class='hs-varid'>num_shards</span>
<a name="line-13425"></a><span class='hs-comment'>{-
<a name="line-13426"></a>input_arg { name: "basename" type: DT_STRING }
<a name="line-13427"></a>input_arg { name: "num_shards" type: DT_INT32 }
<a name="line-13428"></a>output_arg { name: "filename" type: DT_STRING }
<a name="line-13429"></a>-}</span>
<a name="line-13430"></a>
<a name="line-13431"></a><a name="transpose"></a><span class='hs-comment'>-- | Shuffle dimensions of x according to a permutation.</span>
<a name="line-13432"></a><span class='hs-comment'>--</span>
<a name="line-13433"></a><span class='hs-comment'>-- The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:</span>
<a name="line-13434"></a><span class='hs-comment'>--   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`</span>
<a name="line-13435"></a><span class='hs-definition'>transpose</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tperm</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tperm</span><span class='hs-layout'>,</span>
<a name="line-13436"></a>                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13437"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tperm</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13438"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-13439"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tperm</span> <span class='hs-comment'>-- ^ __perm__</span>
<a name="line-13440"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-13441"></a><span class='hs-definition'>transpose</span> <span class='hs-varid'>x</span> <span class='hs-varid'>perm</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13442"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Transpose"</span>
<a name="line-13443"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13444"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tperm"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tperm</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13445"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>perm</span>
<a name="line-13446"></a><span class='hs-comment'>{-
<a name="line-13447"></a>attr { name: "T" type: "type" }
<a name="line-13448"></a>attr {
<a name="line-13449"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-13450"></a>  default_value { type: DT_INT32 }
<a name="line-13451"></a>  name: "Tperm"
<a name="line-13452"></a>  type: "type"
<a name="line-13453"></a>}
<a name="line-13454"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-13455"></a>input_arg { name: "perm" type_attr: "Tperm" }
<a name="line-13456"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-13457"></a>-}</span>
<a name="line-13458"></a>
<a name="line-13459"></a><a name="reduceJoin"></a><span class='hs-comment'>-- | Joins a string Tensor across the given dimensions.</span>
<a name="line-13460"></a><span class='hs-comment'>--</span>
<a name="line-13461"></a><span class='hs-comment'>-- Computes the string join across dimensions in the given string Tensor of shape</span>
<a name="line-13462"></a><span class='hs-comment'>-- `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input</span>
<a name="line-13463"></a><span class='hs-comment'>-- strings with the given separator (default: empty string).  Negative indices are</span>
<a name="line-13464"></a><span class='hs-comment'>-- counted backwards from the end, with `-1` being equivalent to `n - 1`.  Passing</span>
<a name="line-13465"></a><span class='hs-comment'>-- an empty `reduction_indices` joins all strings in linear index order and outputs</span>
<a name="line-13466"></a><span class='hs-comment'>-- a scalar string.</span>
<a name="line-13467"></a><span class='hs-comment'>-- </span>
<a name="line-13468"></a><span class='hs-comment'>-- </span>
<a name="line-13469"></a><span class='hs-comment'>-- For example:</span>
<a name="line-13470"></a><span class='hs-comment'>-- </span>
<a name="line-13471"></a><span class='hs-comment'>-- ```</span>
<a name="line-13472"></a><span class='hs-comment'>-- # tensor `a` is [["a", "b"], ["c", "d"]]</span>
<a name="line-13473"></a><span class='hs-comment'>-- tf.reduce_join(a, 0) ==&gt; ["ac", "bd"]</span>
<a name="line-13474"></a><span class='hs-comment'>-- tf.reduce_join(a, 1) ==&gt; ["ab", "cd"]</span>
<a name="line-13475"></a><span class='hs-comment'>-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==&gt; ["ac", "bd"]</span>
<a name="line-13476"></a><span class='hs-comment'>-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==&gt; ["ab", "cd"]</span>
<a name="line-13477"></a><span class='hs-comment'>-- tf.reduce_join(a, 0, keep_dims=True) ==&gt; [["ac", "bd"]]</span>
<a name="line-13478"></a><span class='hs-comment'>-- tf.reduce_join(a, 1, keep_dims=True) ==&gt; [["ab"], ["cd"]]</span>
<a name="line-13479"></a><span class='hs-comment'>-- tf.reduce_join(a, 0, separator=".") ==&gt; ["a.c", "b.d"]</span>
<a name="line-13480"></a><span class='hs-comment'>-- tf.reduce_join(a, [0, 1]) ==&gt; ["acbd"]</span>
<a name="line-13481"></a><span class='hs-comment'>-- tf.reduce_join(a, [1, 0]) ==&gt; ["abcd"]</span>
<a name="line-13482"></a><span class='hs-comment'>-- tf.reduce_join(a, []) ==&gt; ["abcd"]</span>
<a name="line-13483"></a><span class='hs-comment'>-- ```</span>
<a name="line-13484"></a><span class='hs-definition'>reduceJoin</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __inputs__: The input to be joined.  All reduced indices must have non-zero size.</span>
<a name="line-13485"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce over.  Dimensions are reduced in the</span>
<a name="line-13486"></a>                                          <span class='hs-comment'>-- order specified.  Omitting `reduction_indices` is equivalent to passing</span>
<a name="line-13487"></a>                                          <span class='hs-comment'>-- `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.</span>
<a name="line-13488"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or</span>
<a name="line-13489"></a>              <span class='hs-comment'>-- set to `1` depending on `keep_dims`.</span>
<a name="line-13490"></a><span class='hs-definition'>reduceJoin</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13491"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReduceJoin"</span><span class='hs-layout'>)</span>
<a name="line-13492"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-13493"></a><span class='hs-comment'>{-
<a name="line-13494"></a>attr {
<a name="line-13495"></a>  default_value { b: false }
<a name="line-13496"></a>  description: "If `True`, retain reduced dimensions with length `1`."
<a name="line-13497"></a>  name: "keep_dims"
<a name="line-13498"></a>  type: "bool"
<a name="line-13499"></a>}
<a name="line-13500"></a>attr {
<a name="line-13501"></a>  default_value { s: "" }
<a name="line-13502"></a>  description: "The separator to use when joining."
<a name="line-13503"></a>  name: "separator"
<a name="line-13504"></a>  type: "string"
<a name="line-13505"></a>}
<a name="line-13506"></a>input_arg {
<a name="line-13507"></a>  description: "The input to be joined.  All reduced indices must have non-zero size."
<a name="line-13508"></a>  name: "inputs"
<a name="line-13509"></a>  type: DT_STRING
<a name="line-13510"></a>}
<a name="line-13511"></a>input_arg {
<a name="line-13512"></a>  description: "The dimensions to reduce over.  Dimensions are reduced in the\norder specified.  Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported."
<a name="line-13513"></a>  name: "reduction_indices"
<a name="line-13514"></a>  type: DT_INT32
<a name="line-13515"></a>}
<a name="line-13516"></a>output_arg {
<a name="line-13517"></a>  description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`."
<a name="line-13518"></a>  name: "output"
<a name="line-13519"></a>  type: DT_STRING
<a name="line-13520"></a>}
<a name="line-13521"></a>-}</span>
<a name="line-13522"></a>
<a name="line-13523"></a><a name="stringToHashBucket"></a><span class='hs-comment'>-- | Converts each string in the input Tensor to its hash mod by a number of buckets.</span>
<a name="line-13524"></a><span class='hs-comment'>--</span>
<a name="line-13525"></a><span class='hs-comment'>-- The hash function is deterministic on the content of the string within the</span>
<a name="line-13526"></a><span class='hs-comment'>-- process.</span>
<a name="line-13527"></a><span class='hs-comment'>-- </span>
<a name="line-13528"></a><span class='hs-comment'>-- Note that the hash function may change from time to time.</span>
<a name="line-13529"></a><span class='hs-comment'>-- This functionality will be deprecated and it's recommended to use</span>
<a name="line-13530"></a><span class='hs-comment'>-- `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.</span>
<a name="line-13531"></a><span class='hs-definition'>stringToHashBucket</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_buckets__: The number of buckets.</span>
<a name="line-13532"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __string_tensor__</span>
<a name="line-13533"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__: A Tensor of the same shape as the input `string_tensor`.</span>
<a name="line-13534"></a><span class='hs-definition'>stringToHashBucket</span> <span class='hs-varid'>num_buckets</span> <span class='hs-varid'>string_tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13535"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringToHashBucket"</span>
<a name="line-13536"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_buckets"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_buckets</span><span class='hs-layout'>)</span>
<a name="line-13537"></a>        <span class='hs-varid'>string_tensor</span>
<a name="line-13538"></a><span class='hs-comment'>{-
<a name="line-13539"></a>attr {
<a name="line-13540"></a>  description: "The number of buckets."
<a name="line-13541"></a>  has_minimum: true
<a name="line-13542"></a>  minimum: 1
<a name="line-13543"></a>  name: "num_buckets"
<a name="line-13544"></a>  type: "int"
<a name="line-13545"></a>}
<a name="line-13546"></a>input_arg { name: "string_tensor" type: DT_STRING }
<a name="line-13547"></a>output_arg {
<a name="line-13548"></a>  description: "A Tensor of the same shape as the input `string_tensor`."
<a name="line-13549"></a>  name: "output"
<a name="line-13550"></a>  type: DT_INT64
<a name="line-13551"></a>}
<a name="line-13552"></a>-}</span>
<a name="line-13553"></a>
<a name="line-13554"></a><span class='hs-comment'>-- | Draws samples from a multinomial distribution.</span>
<a name="line-13555"></a>
<a name="line-13556"></a><a name="multinomial"></a><span class='hs-definition'>multinomial</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-13557"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13558"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-13559"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-13560"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-13561"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-13562"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13563"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`</span>
<a name="line-13564"></a>                           <span class='hs-comment'>-- represents the unnormalized log probabilities for all classes.</span>
<a name="line-13565"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_samples__: 0-D.  Number of independent samples to draw for each row slice.</span>
<a name="line-13566"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`</span>
<a name="line-13567"></a>               <span class='hs-comment'>-- contains the drawn class labels with range `[0, num_classes)`.</span>
<a name="line-13568"></a><span class='hs-definition'>multinomial</span> <span class='hs-varid'>logits</span> <span class='hs-varid'>num_samples</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13569"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Multinomial"</span>
<a name="line-13570"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13571"></a>        <span class='hs-varid'>logits</span> <span class='hs-varid'>num_samples</span>
<a name="line-13572"></a><span class='hs-comment'>{-
<a name="line-13573"></a>attr {
<a name="line-13574"></a>  default_value { i: 0 }
<a name="line-13575"></a>  description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
<a name="line-13576"></a>  name: "seed"
<a name="line-13577"></a>  type: "int"
<a name="line-13578"></a>}
<a name="line-13579"></a>attr {
<a name="line-13580"></a>  default_value { i: 0 }
<a name="line-13581"></a>  description: "A second seed to avoid seed collision."
<a name="line-13582"></a>  name: "seed2"
<a name="line-13583"></a>  type: "int"
<a name="line-13584"></a>}
<a name="line-13585"></a>attr {
<a name="line-13586"></a>  allowed_values {
<a name="line-13587"></a>    list {
<a name="line-13588"></a>      type: DT_FLOAT
<a name="line-13589"></a>      type: DT_DOUBLE
<a name="line-13590"></a>      type: DT_INT32
<a name="line-13591"></a>      type: DT_INT64
<a name="line-13592"></a>      type: DT_UINT8
<a name="line-13593"></a>      type: DT_INT16
<a name="line-13594"></a>      type: DT_INT8
<a name="line-13595"></a>      type: DT_UINT16
<a name="line-13596"></a>      type: DT_HALF
<a name="line-13597"></a>    }
<a name="line-13598"></a>  }
<a name="line-13599"></a>  name: "T"
<a name="line-13600"></a>  type: "type"
<a name="line-13601"></a>}
<a name="line-13602"></a>input_arg {
<a name="line-13603"></a>  description: "2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes."
<a name="line-13604"></a>  name: "logits"
<a name="line-13605"></a>  type_attr: "T"
<a name="line-13606"></a>}
<a name="line-13607"></a>input_arg {
<a name="line-13608"></a>  description: "0-D.  Number of independent samples to draw for each row slice."
<a name="line-13609"></a>  name: "num_samples"
<a name="line-13610"></a>  type: DT_INT32
<a name="line-13611"></a>}
<a name="line-13612"></a>output_arg {
<a name="line-13613"></a>  description: "2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`."
<a name="line-13614"></a>  name: "output"
<a name="line-13615"></a>  type: DT_INT64
<a name="line-13616"></a>}
<a name="line-13617"></a>-}</span>
<a name="line-13618"></a>
<a name="line-13619"></a><a name="stringToHashBucketStrong"></a><span class='hs-comment'>-- | Converts each string in the input Tensor to its hash mod by a number of buckets.</span>
<a name="line-13620"></a><span class='hs-comment'>--</span>
<a name="line-13621"></a><span class='hs-comment'>-- The hash function is deterministic on the content of the string within the</span>
<a name="line-13622"></a><span class='hs-comment'>-- process. The hash function is a keyed hash function, where attribute `key`</span>
<a name="line-13623"></a><span class='hs-comment'>-- defines the key of the hash function. `key` is an array of 2 elements.</span>
<a name="line-13624"></a><span class='hs-comment'>-- </span>
<a name="line-13625"></a><span class='hs-comment'>-- A strong hash is important when inputs may be malicious, e.g. URLs with</span>
<a name="line-13626"></a><span class='hs-comment'>-- additional components. Adversaries could try to make their inputs hash to the</span>
<a name="line-13627"></a><span class='hs-comment'>-- same bucket for a denial-of-service attack or to skew the results. A strong</span>
<a name="line-13628"></a><span class='hs-comment'>-- hash prevents this by making it dificult, if not infeasible, to compute inputs</span>
<a name="line-13629"></a><span class='hs-comment'>-- that hash to the same bucket. This comes at a cost of roughly 4x higher compute</span>
<a name="line-13630"></a><span class='hs-comment'>-- time than `tf.string_to_hash_bucket_fast`.</span>
<a name="line-13631"></a><span class='hs-definition'>stringToHashBucketStrong</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_buckets__: The number of buckets.</span>
<a name="line-13632"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: The strings to assign a hash bucket.</span>
<a name="line-13633"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__: A Tensor of the same shape as the input `string_tensor`.</span>
<a name="line-13634"></a><span class='hs-definition'>stringToHashBucketStrong</span> <span class='hs-varid'>num_buckets</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13635"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringToHashBucketStrong"</span>
<a name="line-13636"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_buckets"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_buckets</span><span class='hs-layout'>)</span>
<a name="line-13637"></a>        <span class='hs-varid'>input</span>
<a name="line-13638"></a><span class='hs-comment'>{-
<a name="line-13639"></a>attr {
<a name="line-13640"></a>  description: "The number of buckets."
<a name="line-13641"></a>  has_minimum: true
<a name="line-13642"></a>  minimum: 1
<a name="line-13643"></a>  name: "num_buckets"
<a name="line-13644"></a>  type: "int"
<a name="line-13645"></a>}
<a name="line-13646"></a>attr {
<a name="line-13647"></a>  description: "The key for the keyed hash function passed as a list of two uint64\nelements."
<a name="line-13648"></a>  name: "key"
<a name="line-13649"></a>  type: "list(int)"
<a name="line-13650"></a>}
<a name="line-13651"></a>input_arg {
<a name="line-13652"></a>  description: "The strings to assign a hash bucket."
<a name="line-13653"></a>  name: "input"
<a name="line-13654"></a>  type: DT_STRING
<a name="line-13655"></a>}
<a name="line-13656"></a>output_arg {
<a name="line-13657"></a>  description: "A Tensor of the same shape as the input `string_tensor`."
<a name="line-13658"></a>  name: "output"
<a name="line-13659"></a>  type: DT_INT64
<a name="line-13660"></a>}
<a name="line-13661"></a>-}</span>
<a name="line-13662"></a>
<a name="line-13663"></a><a name="scatterNdUpdate"></a><span class='hs-comment'>-- | Applies sparse `updates` to individual values or slices within a given</span>
<a name="line-13664"></a><span class='hs-comment'>--</span>
<a name="line-13665"></a><span class='hs-comment'>-- variable according to `indices`.</span>
<a name="line-13666"></a><span class='hs-comment'>-- </span>
<a name="line-13667"></a><span class='hs-comment'>-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.</span>
<a name="line-13668"></a><span class='hs-comment'>-- </span>
<a name="line-13669"></a><span class='hs-comment'>-- `indices` must be integer tensor, containing indices into `ref`.</span>
<a name="line-13670"></a><span class='hs-comment'>-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt; K &lt;= P`.</span>
<a name="line-13671"></a><span class='hs-comment'>-- </span>
<a name="line-13672"></a><span class='hs-comment'>-- The innermost dimension of `indices` (with length `K`) corresponds to</span>
<a name="line-13673"></a><span class='hs-comment'>-- indices into elements (if `K = P`) or slices (if `K &lt; P`) along the `K`th</span>
<a name="line-13674"></a><span class='hs-comment'>-- dimension of `ref`.</span>
<a name="line-13675"></a><span class='hs-comment'>-- </span>
<a name="line-13676"></a><span class='hs-comment'>-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:</span>
<a name="line-13677"></a><span class='hs-comment'>-- </span>
<a name="line-13678"></a><span class='hs-comment'>-- ```</span>
<a name="line-13679"></a><span class='hs-comment'>-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].</span>
<a name="line-13680"></a><span class='hs-comment'>-- ```</span>
<a name="line-13681"></a><span class='hs-comment'>-- </span>
<a name="line-13682"></a><span class='hs-comment'>-- For example, say we want to update 4 scattered elements to a rank-1 tensor to</span>
<a name="line-13683"></a><span class='hs-comment'>-- 8 elements. In Python, that update would look like this:</span>
<a name="line-13684"></a><span class='hs-comment'>-- </span>
<a name="line-13685"></a><span class='hs-comment'>--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])</span>
<a name="line-13686"></a><span class='hs-comment'>--     indices = tf.constant([[4], [3], [1] ,[7]])</span>
<a name="line-13687"></a><span class='hs-comment'>--     updates = tf.constant([9, 10, 11, 12])</span>
<a name="line-13688"></a><span class='hs-comment'>--     update = tf.scatter_nd_update(ref, indices, updates)</span>
<a name="line-13689"></a><span class='hs-comment'>--     with tf.Session() as sess:</span>
<a name="line-13690"></a><span class='hs-comment'>--       print sess.run(update)</span>
<a name="line-13691"></a><span class='hs-comment'>-- </span>
<a name="line-13692"></a><span class='hs-comment'>-- The resulting update to ref would look like this:</span>
<a name="line-13693"></a><span class='hs-comment'>-- </span>
<a name="line-13694"></a><span class='hs-comment'>--     [1, 11, 3, 10, 9, 6, 7, 12]</span>
<a name="line-13695"></a><span class='hs-comment'>-- </span>
<a name="line-13696"></a><span class='hs-comment'>-- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to</span>
<a name="line-13697"></a><span class='hs-comment'>-- slices.</span>
<a name="line-13698"></a><span class='hs-definition'>scatterNdUpdate</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-13699"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13700"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13701"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: A mutable Tensor. Should be from a Variable node.</span>
<a name="line-13702"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.</span>
<a name="line-13703"></a>                                         <span class='hs-comment'>-- A tensor of indices into ref.</span>
<a name="line-13704"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated</span>
<a name="line-13705"></a>                                  <span class='hs-comment'>-- values to add to ref.</span>
<a name="line-13706"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to</span>
<a name="line-13707"></a>                   <span class='hs-comment'>-- use the updated values after the update is done.</span>
<a name="line-13708"></a><span class='hs-definition'>scatterNdUpdate</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13709"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterNdUpdate"</span>
<a name="line-13710"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13711"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13712"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-13713"></a><span class='hs-comment'>{-
<a name="line-13714"></a>attr { name: "T" type: "type" }
<a name="line-13715"></a>attr {
<a name="line-13716"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-13717"></a>  name: "Tindices"
<a name="line-13718"></a>  type: "type"
<a name="line-13719"></a>}
<a name="line-13720"></a>attr {
<a name="line-13721"></a>  default_value { b: true }
<a name="line-13722"></a>  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
<a name="line-13723"></a>  name: "use_locking"
<a name="line-13724"></a>  type: "bool"
<a name="line-13725"></a>}
<a name="line-13726"></a>input_arg {
<a name="line-13727"></a>  description: "A mutable Tensor. Should be from a Variable node."
<a name="line-13728"></a>  is_ref: true
<a name="line-13729"></a>  name: "ref"
<a name="line-13730"></a>  type_attr: "T"
<a name="line-13731"></a>}
<a name="line-13732"></a>input_arg {
<a name="line-13733"></a>  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
<a name="line-13734"></a>  name: "indices"
<a name="line-13735"></a>  type_attr: "Tindices"
<a name="line-13736"></a>}
<a name="line-13737"></a>input_arg {
<a name="line-13738"></a>  description: "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref."
<a name="line-13739"></a>  name: "updates"
<a name="line-13740"></a>  type_attr: "T"
<a name="line-13741"></a>}
<a name="line-13742"></a>output_arg {
<a name="line-13743"></a>  description: "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done."
<a name="line-13744"></a>  is_ref: true
<a name="line-13745"></a>  name: "output_ref"
<a name="line-13746"></a>  type_attr: "T"
<a name="line-13747"></a>}
<a name="line-13748"></a>-}</span>
<a name="line-13749"></a>
<a name="line-13750"></a><span class='hs-comment'>-- | Compute gradients for a FakeQuantWithMinMaxVars operation.</span>
<a name="line-13751"></a>
<a name="line-13752"></a><a name="fakeQuantWithMinMaxVarsGradient"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.</span>
<a name="line-13753"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation.</span>
<a name="line-13754"></a>                                                      <span class='hs-comment'>-- min, max: Quantization interval, scalar floats.</span>
<a name="line-13755"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min__</span>
<a name="line-13756"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max__</span>
<a name="line-13757"></a>                                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-13758"></a>                                       <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-13759"></a>                                   <span class='hs-comment'>-- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)</span>
<a name="line-13760"></a>                                   <span class='hs-comment'>--</span>
<a name="line-13761"></a>                                   <span class='hs-comment'>-- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs:</span>
<a name="line-13762"></a>                                   <span class='hs-comment'>-- `gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`.</span>
<a name="line-13763"></a>                                   <span class='hs-comment'>--</span>
<a name="line-13764"></a>                                   <span class='hs-comment'>-- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter:</span>
<a name="line-13765"></a>                                   <span class='hs-comment'>-- `sum(gradients * (inputs &lt; min))`.</span>
<a name="line-13766"></a>                                   <span class='hs-comment'>--</span>
<a name="line-13767"></a>                                   <span class='hs-comment'>-- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter:</span>
<a name="line-13768"></a>                                   <span class='hs-comment'>-- `sum(gradients * (inputs &gt; max))`.</span>
<a name="line-13769"></a><span class='hs-definition'>fakeQuantWithMinMaxVarsGradient</span> <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13770"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxVarsGradient"</span><span class='hs-layout'>)</span>
<a name="line-13771"></a>        <span class='hs-varid'>gradients</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span>
<a name="line-13772"></a><span class='hs-comment'>{-
<a name="line-13773"></a>input_arg {
<a name="line-13774"></a>  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation."
<a name="line-13775"></a>  name: "gradients"
<a name="line-13776"></a>  type: DT_FLOAT
<a name="line-13777"></a>}
<a name="line-13778"></a>input_arg {
<a name="line-13779"></a>  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats."
<a name="line-13780"></a>  name: "inputs"
<a name="line-13781"></a>  type: DT_FLOAT
<a name="line-13782"></a>}
<a name="line-13783"></a>input_arg { name: "min" type: DT_FLOAT }
<a name="line-13784"></a>input_arg { name: "max" type: DT_FLOAT }
<a name="line-13785"></a>output_arg {
<a name="line-13786"></a>  description: "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs &gt;= min &amp;&amp; inputs &lt;= max)`."
<a name="line-13787"></a>  name: "backprops_wrt_input"
<a name="line-13788"></a>  type: DT_FLOAT
<a name="line-13789"></a>}
<a name="line-13790"></a>output_arg {
<a name="line-13791"></a>  description: "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs &lt; min))`."
<a name="line-13792"></a>  name: "backprop_wrt_min"
<a name="line-13793"></a>  type: DT_FLOAT
<a name="line-13794"></a>}
<a name="line-13795"></a>output_arg {
<a name="line-13796"></a>  description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs &gt; max))`."
<a name="line-13797"></a>  name: "backprop_wrt_max"
<a name="line-13798"></a>  type: DT_FLOAT
<a name="line-13799"></a>}
<a name="line-13800"></a>-}</span>
<a name="line-13801"></a>
<a name="line-13802"></a><a name="size"></a><span class='hs-comment'>-- | Returns the size of a tensor.</span>
<a name="line-13803"></a><span class='hs-comment'>--</span>
<a name="line-13804"></a><span class='hs-comment'>-- This operation returns an integer representing the number of elements in</span>
<a name="line-13805"></a><span class='hs-comment'>-- `input`.</span>
<a name="line-13806"></a><span class='hs-comment'>-- </span>
<a name="line-13807"></a><span class='hs-comment'>-- For example:</span>
<a name="line-13808"></a><span class='hs-comment'>-- </span>
<a name="line-13809"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-13810"></a><span class='hs-comment'>-- # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]</span>
<a name="line-13811"></a><span class='hs-comment'>-- size(t) ==&gt; 12</span>
<a name="line-13812"></a><span class='hs-comment'>-- ```</span>
<a name="line-13813"></a><span class='hs-definition'>size</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-13814"></a>                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13815"></a>                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13816"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-13817"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-13818"></a><span class='hs-definition'>size</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13819"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Size"</span>
<a name="line-13820"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13821"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13822"></a>        <span class='hs-varid'>input</span>
<a name="line-13823"></a><span class='hs-comment'>{-
<a name="line-13824"></a>attr { name: "T" type: "type" }
<a name="line-13825"></a>attr {
<a name="line-13826"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-13827"></a>  default_value { type: DT_INT32 }
<a name="line-13828"></a>  name: "out_type"
<a name="line-13829"></a>  type: "type"
<a name="line-13830"></a>}
<a name="line-13831"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-13832"></a>output_arg { name: "output" type_attr: "out_type" }
<a name="line-13833"></a>-}</span>
<a name="line-13834"></a>
<a name="line-13835"></a><a name="scatterDiv"></a><span class='hs-comment'>-- | Divides a variable reference by sparse updates.</span>
<a name="line-13836"></a><span class='hs-comment'>--</span>
<a name="line-13837"></a><span class='hs-comment'>-- This operation computes</span>
<a name="line-13838"></a><span class='hs-comment'>-- </span>
<a name="line-13839"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-13840"></a><span class='hs-comment'>--     ref[indices, ...] /= updates[...]</span>
<a name="line-13841"></a><span class='hs-comment'>-- </span>
<a name="line-13842"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-13843"></a><span class='hs-comment'>--     ref[indices[i], ...] /= updates[i, ...]</span>
<a name="line-13844"></a><span class='hs-comment'>-- </span>
<a name="line-13845"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-13846"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]</span>
<a name="line-13847"></a><span class='hs-comment'>-- </span>
<a name="line-13848"></a><span class='hs-comment'>-- This operation outputs `ref` after the update is done.</span>
<a name="line-13849"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-13850"></a><span class='hs-comment'>-- </span>
<a name="line-13851"></a><span class='hs-comment'>-- Duplicate entries are handled correctly: if multiple `indices` reference</span>
<a name="line-13852"></a><span class='hs-comment'>-- the same location, their contributions divide.</span>
<a name="line-13853"></a><span class='hs-comment'>-- </span>
<a name="line-13854"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-13855"></a><span class='hs-definition'>scatterDiv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-13856"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13857"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13858"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13859"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-13860"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-13861"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-13862"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-13863"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13864"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13865"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-13866"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-13867"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A tensor of values that `ref` is divided by.</span>
<a name="line-13868"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want</span>
<a name="line-13869"></a>              <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-13870"></a><span class='hs-definition'>scatterDiv</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13871"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterDiv"</span>
<a name="line-13872"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13873"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13874"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-13875"></a><span class='hs-comment'>{-
<a name="line-13876"></a>attr {
<a name="line-13877"></a>  allowed_values {
<a name="line-13878"></a>    list {
<a name="line-13879"></a>      type: DT_FLOAT
<a name="line-13880"></a>      type: DT_DOUBLE
<a name="line-13881"></a>      type: DT_INT64
<a name="line-13882"></a>      type: DT_INT32
<a name="line-13883"></a>      type: DT_UINT8
<a name="line-13884"></a>      type: DT_UINT16
<a name="line-13885"></a>      type: DT_INT16
<a name="line-13886"></a>      type: DT_INT8
<a name="line-13887"></a>      type: DT_COMPLEX64
<a name="line-13888"></a>      type: DT_COMPLEX128
<a name="line-13889"></a>      type: DT_QINT8
<a name="line-13890"></a>      type: DT_QUINT8
<a name="line-13891"></a>      type: DT_QINT32
<a name="line-13892"></a>      type: DT_HALF
<a name="line-13893"></a>    }
<a name="line-13894"></a>  }
<a name="line-13895"></a>  name: "T"
<a name="line-13896"></a>  type: "type"
<a name="line-13897"></a>}
<a name="line-13898"></a>attr {
<a name="line-13899"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-13900"></a>  name: "Tindices"
<a name="line-13901"></a>  type: "type"
<a name="line-13902"></a>}
<a name="line-13903"></a>attr {
<a name="line-13904"></a>  default_value { b: false }
<a name="line-13905"></a>  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-13906"></a>  name: "use_locking"
<a name="line-13907"></a>  type: "bool"
<a name="line-13908"></a>}
<a name="line-13909"></a>input_arg {
<a name="line-13910"></a>  description: "Should be from a `Variable` node."
<a name="line-13911"></a>  is_ref: true
<a name="line-13912"></a>  name: "ref"
<a name="line-13913"></a>  type_attr: "T"
<a name="line-13914"></a>}
<a name="line-13915"></a>input_arg {
<a name="line-13916"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-13917"></a>  name: "indices"
<a name="line-13918"></a>  type_attr: "Tindices"
<a name="line-13919"></a>}
<a name="line-13920"></a>input_arg {
<a name="line-13921"></a>  description: "A tensor of values that `ref` is divided by."
<a name="line-13922"></a>  name: "updates"
<a name="line-13923"></a>  type_attr: "T"
<a name="line-13924"></a>}
<a name="line-13925"></a>output_arg {
<a name="line-13926"></a>  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-13927"></a>  is_ref: true
<a name="line-13928"></a>  name: "output_ref"
<a name="line-13929"></a>  type_attr: "T"
<a name="line-13930"></a>}
<a name="line-13931"></a>-}</span>
<a name="line-13932"></a>
<a name="line-13933"></a><a name="scatterMul"></a><span class='hs-comment'>-- | Multiplies sparse updates into a variable reference.</span>
<a name="line-13934"></a><span class='hs-comment'>--</span>
<a name="line-13935"></a><span class='hs-comment'>-- This operation computes</span>
<a name="line-13936"></a><span class='hs-comment'>-- </span>
<a name="line-13937"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-13938"></a><span class='hs-comment'>--     ref[indices, ...] *= updates[...]</span>
<a name="line-13939"></a><span class='hs-comment'>-- </span>
<a name="line-13940"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-13941"></a><span class='hs-comment'>--     ref[indices[i], ...] *= updates[i, ...]</span>
<a name="line-13942"></a><span class='hs-comment'>-- </span>
<a name="line-13943"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-13944"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]</span>
<a name="line-13945"></a><span class='hs-comment'>-- </span>
<a name="line-13946"></a><span class='hs-comment'>-- This operation outputs `ref` after the update is done.</span>
<a name="line-13947"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-13948"></a><span class='hs-comment'>-- </span>
<a name="line-13949"></a><span class='hs-comment'>-- Duplicate entries are handled correctly: if multiple `indices` reference</span>
<a name="line-13950"></a><span class='hs-comment'>-- the same location, their contributions multiply.</span>
<a name="line-13951"></a><span class='hs-comment'>-- </span>
<a name="line-13952"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-13953"></a><span class='hs-definition'>scatterMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-13954"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13955"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-13956"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13957"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-13958"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-13959"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-13960"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-13961"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-13962"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-13963"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-13964"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-13965"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A tensor of updated values to multiply to `ref`.</span>
<a name="line-13966"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want</span>
<a name="line-13967"></a>              <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-13968"></a><span class='hs-definition'>scatterMul</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-13969"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterMul"</span>
<a name="line-13970"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-13971"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-13972"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-13973"></a><span class='hs-comment'>{-
<a name="line-13974"></a>attr {
<a name="line-13975"></a>  allowed_values {
<a name="line-13976"></a>    list {
<a name="line-13977"></a>      type: DT_FLOAT
<a name="line-13978"></a>      type: DT_DOUBLE
<a name="line-13979"></a>      type: DT_INT64
<a name="line-13980"></a>      type: DT_INT32
<a name="line-13981"></a>      type: DT_UINT8
<a name="line-13982"></a>      type: DT_UINT16
<a name="line-13983"></a>      type: DT_INT16
<a name="line-13984"></a>      type: DT_INT8
<a name="line-13985"></a>      type: DT_COMPLEX64
<a name="line-13986"></a>      type: DT_COMPLEX128
<a name="line-13987"></a>      type: DT_QINT8
<a name="line-13988"></a>      type: DT_QUINT8
<a name="line-13989"></a>      type: DT_QINT32
<a name="line-13990"></a>      type: DT_HALF
<a name="line-13991"></a>    }
<a name="line-13992"></a>  }
<a name="line-13993"></a>  name: "T"
<a name="line-13994"></a>  type: "type"
<a name="line-13995"></a>}
<a name="line-13996"></a>attr {
<a name="line-13997"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-13998"></a>  name: "Tindices"
<a name="line-13999"></a>  type: "type"
<a name="line-14000"></a>}
<a name="line-14001"></a>attr {
<a name="line-14002"></a>  default_value { b: false }
<a name="line-14003"></a>  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-14004"></a>  name: "use_locking"
<a name="line-14005"></a>  type: "bool"
<a name="line-14006"></a>}
<a name="line-14007"></a>input_arg {
<a name="line-14008"></a>  description: "Should be from a `Variable` node."
<a name="line-14009"></a>  is_ref: true
<a name="line-14010"></a>  name: "ref"
<a name="line-14011"></a>  type_attr: "T"
<a name="line-14012"></a>}
<a name="line-14013"></a>input_arg {
<a name="line-14014"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-14015"></a>  name: "indices"
<a name="line-14016"></a>  type_attr: "Tindices"
<a name="line-14017"></a>}
<a name="line-14018"></a>input_arg {
<a name="line-14019"></a>  description: "A tensor of updated values to multiply to `ref`."
<a name="line-14020"></a>  name: "updates"
<a name="line-14021"></a>  type_attr: "T"
<a name="line-14022"></a>}
<a name="line-14023"></a>output_arg {
<a name="line-14024"></a>  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-14025"></a>  is_ref: true
<a name="line-14026"></a>  name: "output_ref"
<a name="line-14027"></a>  type_attr: "T"
<a name="line-14028"></a>}
<a name="line-14029"></a>-}</span>
<a name="line-14030"></a>
<a name="line-14031"></a><a name="copyHost"></a><span class='hs-comment'>-- | Copy Host Op.</span>
<a name="line-14032"></a><span class='hs-comment'>--</span>
<a name="line-14033"></a><span class='hs-comment'>-- Performs CPU-to-CPU deep-copying of tensor.</span>
<a name="line-14034"></a><span class='hs-comment'>-- </span>
<a name="line-14035"></a><span class='hs-comment'>-- Unlike the Copy Op, this op has HostMemory constraint on its input or output.</span>
<a name="line-14036"></a><span class='hs-definition'>copyHost</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14037"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Input tensor.</span>
<a name="line-14038"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Output tensor, deep-copied from input.</span>
<a name="line-14039"></a><span class='hs-definition'>copyHost</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14040"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CopyHost"</span>
<a name="line-14041"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14042"></a>        <span class='hs-varid'>input</span>
<a name="line-14043"></a><span class='hs-comment'>{-
<a name="line-14044"></a>attr { name: "T" type: "type" }
<a name="line-14045"></a>attr {
<a name="line-14046"></a>  default_value { s: "" }
<a name="line-14047"></a>  description: "The name of the input tensor."
<a name="line-14048"></a>  name: "tensor_name"
<a name="line-14049"></a>  type: "string"
<a name="line-14050"></a>}
<a name="line-14051"></a>input_arg {
<a name="line-14052"></a>  description: "Input tensor." name: "input" type_attr: "T"
<a name="line-14053"></a>}
<a name="line-14054"></a>output_arg {
<a name="line-14055"></a>  description: "Output tensor, deep-copied from input."
<a name="line-14056"></a>  name: "output"
<a name="line-14057"></a>  type_attr: "T"
<a name="line-14058"></a>}
<a name="line-14059"></a>-}</span>
<a name="line-14060"></a>
<a name="line-14061"></a><a name="wholeFileReader"></a><span class='hs-comment'>-- | A Reader that outputs the entire contents of a file as a value.</span>
<a name="line-14062"></a><span class='hs-comment'>--</span>
<a name="line-14063"></a><span class='hs-comment'>-- To use, enqueue filenames in a Queue.  The output of ReaderRead will</span>
<a name="line-14064"></a><span class='hs-comment'>-- be a filename (key) and the contents of that file (value).</span>
<a name="line-14065"></a><span class='hs-definition'>wholeFileReader</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __reader_handle__: The handle to reference the Reader.</span>
<a name="line-14066"></a><span class='hs-definition'>wholeFileReader</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14067"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"WholeFileReader"</span><span class='hs-layout'>)</span>
<a name="line-14068"></a>        
<a name="line-14069"></a><span class='hs-comment'>{-
<a name="line-14070"></a>attr {
<a name="line-14071"></a>  default_value { s: "" }
<a name="line-14072"></a>  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
<a name="line-14073"></a>  name: "container"
<a name="line-14074"></a>  type: "string"
<a name="line-14075"></a>}
<a name="line-14076"></a>attr {
<a name="line-14077"></a>  default_value { s: "" }
<a name="line-14078"></a>  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-14079"></a>  name: "shared_name"
<a name="line-14080"></a>  type: "string"
<a name="line-14081"></a>}
<a name="line-14082"></a>output_arg {
<a name="line-14083"></a>  description: "The handle to reference the Reader."
<a name="line-14084"></a>  is_ref: true
<a name="line-14085"></a>  name: "reader_handle"
<a name="line-14086"></a>  type: DT_STRING
<a name="line-14087"></a>}
<a name="line-14088"></a>-}</span>
<a name="line-14089"></a>
<a name="line-14090"></a><a name="takeManySparseFromTensorsMap"></a><span class='hs-comment'>-- | Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.</span>
<a name="line-14091"></a><span class='hs-comment'>--</span>
<a name="line-14092"></a><span class='hs-comment'>-- The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where</span>
<a name="line-14093"></a><span class='hs-comment'>-- `N` is the minibatch size and the rows correspond to the output handles of</span>
<a name="line-14094"></a><span class='hs-comment'>-- `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the</span>
<a name="line-14095"></a><span class='hs-comment'>-- original `SparseTensor` objects that went into the given input ops must all</span>
<a name="line-14096"></a><span class='hs-comment'>-- match.  When the final `SparseTensor` is created, it has rank one</span>
<a name="line-14097"></a><span class='hs-comment'>-- higher than the ranks of the incoming `SparseTensor` objects</span>
<a name="line-14098"></a><span class='hs-comment'>-- (they have been concatenated along a new row dimension on the left).</span>
<a name="line-14099"></a><span class='hs-comment'>-- </span>
<a name="line-14100"></a><span class='hs-comment'>-- The output `SparseTensor` object's shape values for all dimensions but the</span>
<a name="line-14101"></a><span class='hs-comment'>-- first are the max across the input `SparseTensor` objects' shape values</span>
<a name="line-14102"></a><span class='hs-comment'>-- for the corresponding dimensions.  Its first shape value is `N`, the minibatch</span>
<a name="line-14103"></a><span class='hs-comment'>-- size.</span>
<a name="line-14104"></a><span class='hs-comment'>-- </span>
<a name="line-14105"></a><span class='hs-comment'>-- The input `SparseTensor` objects' indices are assumed ordered in</span>
<a name="line-14106"></a><span class='hs-comment'>-- standard lexicographic order.  If this is not the case, after this</span>
<a name="line-14107"></a><span class='hs-comment'>-- step run `SparseReorder` to restore index ordering.</span>
<a name="line-14108"></a><span class='hs-comment'>-- </span>
<a name="line-14109"></a><span class='hs-comment'>-- For example, if the handles represent an input, which is a `[2, 3]` matrix</span>
<a name="line-14110"></a><span class='hs-comment'>-- representing two original `SparseTensor` objects:</span>
<a name="line-14111"></a><span class='hs-comment'>-- </span>
<a name="line-14112"></a><span class='hs-comment'>-- ```</span>
<a name="line-14113"></a><span class='hs-comment'>--     index = [ 0]</span>
<a name="line-14114"></a><span class='hs-comment'>--             [10]</span>
<a name="line-14115"></a><span class='hs-comment'>--             [20]</span>
<a name="line-14116"></a><span class='hs-comment'>--     values = [1, 2, 3]</span>
<a name="line-14117"></a><span class='hs-comment'>--     shape = [50]</span>
<a name="line-14118"></a><span class='hs-comment'>-- ```</span>
<a name="line-14119"></a><span class='hs-comment'>-- </span>
<a name="line-14120"></a><span class='hs-comment'>-- and</span>
<a name="line-14121"></a><span class='hs-comment'>-- </span>
<a name="line-14122"></a><span class='hs-comment'>-- ```</span>
<a name="line-14123"></a><span class='hs-comment'>--     index = [ 2]</span>
<a name="line-14124"></a><span class='hs-comment'>--             [10]</span>
<a name="line-14125"></a><span class='hs-comment'>--     values = [4, 5]</span>
<a name="line-14126"></a><span class='hs-comment'>--     shape = [30]</span>
<a name="line-14127"></a><span class='hs-comment'>-- ```</span>
<a name="line-14128"></a><span class='hs-comment'>-- </span>
<a name="line-14129"></a><span class='hs-comment'>-- then the final `SparseTensor` will be:</span>
<a name="line-14130"></a><span class='hs-comment'>-- </span>
<a name="line-14131"></a><span class='hs-comment'>-- ```</span>
<a name="line-14132"></a><span class='hs-comment'>--     index = [0  0]</span>
<a name="line-14133"></a><span class='hs-comment'>--             [0 10]</span>
<a name="line-14134"></a><span class='hs-comment'>--             [0 20]</span>
<a name="line-14135"></a><span class='hs-comment'>--             [1  2]</span>
<a name="line-14136"></a><span class='hs-comment'>--             [1 10]</span>
<a name="line-14137"></a><span class='hs-comment'>--     values = [1, 2, 3, 4, 5]</span>
<a name="line-14138"></a><span class='hs-comment'>--     shape = [2 50]</span>
<a name="line-14139"></a><span class='hs-comment'>-- ```</span>
<a name="line-14140"></a><span class='hs-definition'>takeManySparseFromTensorsMap</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14141"></a>                                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects.</span>
<a name="line-14142"></a>                                                         <span class='hs-comment'>-- Shape: `[N]`.</span>
<a name="line-14143"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-14144"></a>                                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-14145"></a>                                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14146"></a>                                <span class='hs-comment'>-- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)</span>
<a name="line-14147"></a>                                <span class='hs-comment'>--</span>
<a name="line-14148"></a>                                <span class='hs-comment'>-- * __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.</span>
<a name="line-14149"></a>                                <span class='hs-comment'>--</span>
<a name="line-14150"></a>                                <span class='hs-comment'>-- * __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.</span>
<a name="line-14151"></a>                                <span class='hs-comment'>--</span>
<a name="line-14152"></a>                                <span class='hs-comment'>-- * __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.</span>
<a name="line-14153"></a><span class='hs-definition'>takeManySparseFromTensorsMap</span> <span class='hs-varid'>sparse_handles</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14154"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TakeManySparseFromTensorsMap"</span>
<a name="line-14155"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14156"></a>        <span class='hs-varid'>sparse_handles</span>
<a name="line-14157"></a><span class='hs-comment'>{-
<a name="line-14158"></a>attr {
<a name="line-14159"></a>  description: "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`."
<a name="line-14160"></a>  name: "dtype"
<a name="line-14161"></a>  type: "type"
<a name="line-14162"></a>}
<a name="line-14163"></a>attr {
<a name="line-14164"></a>  default_value { s: "" }
<a name="line-14165"></a>  description: "The container name for the `SparseTensorsMap` read by this op."
<a name="line-14166"></a>  name: "container"
<a name="line-14167"></a>  type: "string"
<a name="line-14168"></a>}
<a name="line-14169"></a>attr {
<a name="line-14170"></a>  default_value { s: "" }
<a name="line-14171"></a>  description: "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used."
<a name="line-14172"></a>  name: "shared_name"
<a name="line-14173"></a>  type: "string"
<a name="line-14174"></a>}
<a name="line-14175"></a>input_arg {
<a name="line-14176"></a>  description: "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`."
<a name="line-14177"></a>  name: "sparse_handles"
<a name="line-14178"></a>  type: DT_INT64
<a name="line-14179"></a>}
<a name="line-14180"></a>output_arg {
<a name="line-14181"></a>  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
<a name="line-14182"></a>  name: "sparse_indices"
<a name="line-14183"></a>  type: DT_INT64
<a name="line-14184"></a>}
<a name="line-14185"></a>output_arg {
<a name="line-14186"></a>  description: "1-D.  The `values` of the minibatch `SparseTensor`."
<a name="line-14187"></a>  name: "sparse_values"
<a name="line-14188"></a>  type_attr: "dtype"
<a name="line-14189"></a>}
<a name="line-14190"></a>output_arg {
<a name="line-14191"></a>  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
<a name="line-14192"></a>  name: "sparse_shape"
<a name="line-14193"></a>  type: DT_INT64
<a name="line-14194"></a>}
<a name="line-14195"></a>-}</span>
<a name="line-14196"></a>
<a name="line-14197"></a><a name="destroyTemporaryVariable"></a><span class='hs-comment'>-- | Destroys the temporary variable and returns its final value.</span>
<a name="line-14198"></a><span class='hs-comment'>--</span>
<a name="line-14199"></a><span class='hs-comment'>-- Sets output to the value of the Tensor pointed to by 'ref', then destroys</span>
<a name="line-14200"></a><span class='hs-comment'>-- the temporary variable called 'var_name'.</span>
<a name="line-14201"></a><span class='hs-comment'>-- All other uses of 'ref' *must* have executed before this op.</span>
<a name="line-14202"></a><span class='hs-comment'>-- This is typically achieved by chaining the ref through each assign op, or by</span>
<a name="line-14203"></a><span class='hs-comment'>-- using control dependencies.</span>
<a name="line-14204"></a><span class='hs-comment'>-- </span>
<a name="line-14205"></a><span class='hs-comment'>-- Outputs the final value of the tensor pointed to by 'ref'.</span>
<a name="line-14206"></a><span class='hs-definition'>destroyTemporaryVariable</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14207"></a>                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: A reference to the temporary variable tensor.</span>
<a name="line-14208"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-14209"></a><span class='hs-definition'>destroyTemporaryVariable</span> <span class='hs-varid'>ref</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14210"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DestroyTemporaryVariable"</span>
<a name="line-14211"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14212"></a>        <span class='hs-varid'>ref</span>
<a name="line-14213"></a><span class='hs-comment'>{-
<a name="line-14214"></a>attr { name: "T" type: "type" }
<a name="line-14215"></a>attr {
<a name="line-14216"></a>  description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op."
<a name="line-14217"></a>  name: "var_name"
<a name="line-14218"></a>  type: "string"
<a name="line-14219"></a>}
<a name="line-14220"></a>input_arg {
<a name="line-14221"></a>  description: "A reference to the temporary variable tensor."
<a name="line-14222"></a>  is_ref: true
<a name="line-14223"></a>  name: "ref"
<a name="line-14224"></a>  type_attr: "T"
<a name="line-14225"></a>}
<a name="line-14226"></a>output_arg { name: "value" type_attr: "T" }
<a name="line-14227"></a>-}</span>
<a name="line-14228"></a>
<a name="line-14229"></a><a name="assignSub"></a><span class='hs-comment'>-- | Update 'ref' by subtracting 'value' from it.</span>
<a name="line-14230"></a><span class='hs-comment'>--</span>
<a name="line-14231"></a><span class='hs-comment'>-- This operation outputs "ref" after the update is done.</span>
<a name="line-14232"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-14233"></a><span class='hs-definition'>assignSub</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14234"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14235"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-14236"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14237"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-14238"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-14239"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-14240"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14241"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-14242"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The value to be subtracted to the variable.</span>
<a name="line-14243"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want</span>
<a name="line-14244"></a>             <span class='hs-comment'>-- to use the new value after the variable has been updated.</span>
<a name="line-14245"></a><span class='hs-definition'>assignSub</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14246"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AssignSub"</span>
<a name="line-14247"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14248"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span>
<a name="line-14249"></a><span class='hs-comment'>{-
<a name="line-14250"></a>attr {
<a name="line-14251"></a>  allowed_values {
<a name="line-14252"></a>    list {
<a name="line-14253"></a>      type: DT_FLOAT
<a name="line-14254"></a>      type: DT_DOUBLE
<a name="line-14255"></a>      type: DT_INT64
<a name="line-14256"></a>      type: DT_INT32
<a name="line-14257"></a>      type: DT_UINT8
<a name="line-14258"></a>      type: DT_UINT16
<a name="line-14259"></a>      type: DT_INT16
<a name="line-14260"></a>      type: DT_INT8
<a name="line-14261"></a>      type: DT_COMPLEX64
<a name="line-14262"></a>      type: DT_COMPLEX128
<a name="line-14263"></a>      type: DT_QINT8
<a name="line-14264"></a>      type: DT_QUINT8
<a name="line-14265"></a>      type: DT_QINT32
<a name="line-14266"></a>      type: DT_HALF
<a name="line-14267"></a>    }
<a name="line-14268"></a>  }
<a name="line-14269"></a>  name: "T"
<a name="line-14270"></a>  type: "type"
<a name="line-14271"></a>}
<a name="line-14272"></a>attr {
<a name="line-14273"></a>  default_value { b: false }
<a name="line-14274"></a>  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-14275"></a>  name: "use_locking"
<a name="line-14276"></a>  type: "bool"
<a name="line-14277"></a>}
<a name="line-14278"></a>input_arg {
<a name="line-14279"></a>  description: "Should be from a `Variable` node."
<a name="line-14280"></a>  is_ref: true
<a name="line-14281"></a>  name: "ref"
<a name="line-14282"></a>  type_attr: "T"
<a name="line-14283"></a>}
<a name="line-14284"></a>input_arg {
<a name="line-14285"></a>  description: "The value to be subtracted to the variable."
<a name="line-14286"></a>  name: "value"
<a name="line-14287"></a>  type_attr: "T"
<a name="line-14288"></a>}
<a name="line-14289"></a>output_arg {
<a name="line-14290"></a>  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
<a name="line-14291"></a>  is_ref: true
<a name="line-14292"></a>  name: "output_ref"
<a name="line-14293"></a>  type_attr: "T"
<a name="line-14294"></a>}
<a name="line-14295"></a>-}</span>
<a name="line-14296"></a>
<a name="line-14297"></a><a name="encodeJpeg"></a><span class='hs-comment'>-- | JPEG-encode an image.</span>
<a name="line-14298"></a><span class='hs-comment'>--</span>
<a name="line-14299"></a><span class='hs-comment'>-- `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.</span>
<a name="line-14300"></a><span class='hs-comment'>-- </span>
<a name="line-14301"></a><span class='hs-comment'>-- The attr `format` can be used to override the color format of the encoded</span>
<a name="line-14302"></a><span class='hs-comment'>-- output.  Values can be:</span>
<a name="line-14303"></a><span class='hs-comment'>-- </span>
<a name="line-14304"></a><span class='hs-comment'>-- *   `''`: Use a default format based on the number of channels in the image.</span>
<a name="line-14305"></a><span class='hs-comment'>-- *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension</span>
<a name="line-14306"></a><span class='hs-comment'>--     of `image` must be 1.</span>
<a name="line-14307"></a><span class='hs-comment'>-- *   `rgb`: Output an RGB JPEG image. The `channels` dimension</span>
<a name="line-14308"></a><span class='hs-comment'>--     of `image` must be 3.</span>
<a name="line-14309"></a><span class='hs-comment'>-- </span>
<a name="line-14310"></a><span class='hs-comment'>-- If `format` is not specified or is the empty string, a default format is picked</span>
<a name="line-14311"></a><span class='hs-comment'>-- in function of the number of channels in `image`:</span>
<a name="line-14312"></a><span class='hs-comment'>-- </span>
<a name="line-14313"></a><span class='hs-comment'>-- *   1: Output a grayscale image.</span>
<a name="line-14314"></a><span class='hs-comment'>-- *   3: Output an RGB image.</span>
<a name="line-14315"></a><span class='hs-definition'>encodeJpeg</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span> <span class='hs-comment'>-- ^ __image__: 3-D with shape `[height, width, channels]`.</span>
<a name="line-14316"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: 0-D. JPEG-encoded image.</span>
<a name="line-14317"></a><span class='hs-definition'>encodeJpeg</span> <span class='hs-varid'>image</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14318"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"EncodeJpeg"</span><span class='hs-layout'>)</span>
<a name="line-14319"></a>        <span class='hs-varid'>image</span>
<a name="line-14320"></a><span class='hs-comment'>{-
<a name="line-14321"></a>attr {
<a name="line-14322"></a>  allowed_values { list { s: "" s: "grayscale" s: "rgb" } }
<a name="line-14323"></a>  default_value { s: "" }
<a name="line-14324"></a>  description: "Per pixel image format."
<a name="line-14325"></a>  name: "format"
<a name="line-14326"></a>  type: "string"
<a name="line-14327"></a>}
<a name="line-14328"></a>attr {
<a name="line-14329"></a>  default_value { i: 95 }
<a name="line-14330"></a>  description: "Quality of the compression from 0 to 100 (higher is better and slower)."
<a name="line-14331"></a>  name: "quality"
<a name="line-14332"></a>  type: "int"
<a name="line-14333"></a>}
<a name="line-14334"></a>attr {
<a name="line-14335"></a>  default_value { b: false }
<a name="line-14336"></a>  description: "If True, create a JPEG that loads progressively (coarse to fine)."
<a name="line-14337"></a>  name: "progressive"
<a name="line-14338"></a>  type: "bool"
<a name="line-14339"></a>}
<a name="line-14340"></a>attr {
<a name="line-14341"></a>  default_value { b: false }
<a name="line-14342"></a>  description: "If True, spend CPU/RAM to reduce size with no quality change."
<a name="line-14343"></a>  name: "optimize_size"
<a name="line-14344"></a>  type: "bool"
<a name="line-14345"></a>}
<a name="line-14346"></a>attr {
<a name="line-14347"></a>  default_value { b: true }
<a name="line-14348"></a>  description: "See <a href="http://en.wikipedia.org/wiki/Chroma_subsampling.">http://en.wikipedia.org/wiki/Chroma_subsampling.</a>"
<a name="line-14349"></a>  name: "chroma_downsampling"
<a name="line-14350"></a>  type: "bool"
<a name="line-14351"></a>}
<a name="line-14352"></a>attr {
<a name="line-14353"></a>  allowed_values { list { s: "in" s: "cm" } }
<a name="line-14354"></a>  default_value { s: "in" }
<a name="line-14355"></a>  description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)."
<a name="line-14356"></a>  name: "density_unit"
<a name="line-14357"></a>  type: "string"
<a name="line-14358"></a>}
<a name="line-14359"></a>attr {
<a name="line-14360"></a>  default_value { i: 300 }
<a name="line-14361"></a>  description: "Horizontal pixels per density unit."
<a name="line-14362"></a>  name: "x_density"
<a name="line-14363"></a>  type: "int"
<a name="line-14364"></a>}
<a name="line-14365"></a>attr {
<a name="line-14366"></a>  default_value { i: 300 }
<a name="line-14367"></a>  description: "Vertical pixels per density unit."
<a name="line-14368"></a>  name: "y_density"
<a name="line-14369"></a>  type: "int"
<a name="line-14370"></a>}
<a name="line-14371"></a>attr {
<a name="line-14372"></a>  default_value { s: "" }
<a name="line-14373"></a>  description: "If not empty, embed this XMP metadata in the image header."
<a name="line-14374"></a>  name: "xmp_metadata"
<a name="line-14375"></a>  type: "string"
<a name="line-14376"></a>}
<a name="line-14377"></a>input_arg {
<a name="line-14378"></a>  description: "3-D with shape `[height, width, channels]`."
<a name="line-14379"></a>  name: "image"
<a name="line-14380"></a>  type: DT_UINT8
<a name="line-14381"></a>}
<a name="line-14382"></a>output_arg {
<a name="line-14383"></a>  description: "0-D. JPEG-encoded image."
<a name="line-14384"></a>  name: "contents"
<a name="line-14385"></a>  type: DT_STRING
<a name="line-14386"></a>}
<a name="line-14387"></a>-}</span>
<a name="line-14388"></a>
<a name="line-14389"></a><a name="temporaryVariable"></a><span class='hs-comment'>-- | Returns a tensor that may be mutated, but only persists within a single step.</span>
<a name="line-14390"></a><span class='hs-comment'>--</span>
<a name="line-14391"></a><span class='hs-comment'>-- This is an experimental op for internal use only and it is possible to use this</span>
<a name="line-14392"></a><span class='hs-comment'>-- op in unsafe ways.  DO NOT USE unless you fully understand the risks.</span>
<a name="line-14393"></a><span class='hs-comment'>-- </span>
<a name="line-14394"></a><span class='hs-comment'>-- It is the caller's responsibility to ensure that 'ref' is eventually passed to a</span>
<a name="line-14395"></a><span class='hs-comment'>-- matching 'DestroyTemporaryVariable' op after all other uses have completed.</span>
<a name="line-14396"></a><span class='hs-comment'>-- </span>
<a name="line-14397"></a><span class='hs-comment'>-- Outputs a ref to the tensor state so it may be read or modified.</span>
<a name="line-14398"></a><span class='hs-comment'>-- </span>
<a name="line-14399"></a><span class='hs-comment'>--   E.g.</span>
<a name="line-14400"></a><span class='hs-comment'>--       var = state_ops._temporary_variable([1, 2], types.float_)</span>
<a name="line-14401"></a><span class='hs-comment'>--       var_name = var.op.name</span>
<a name="line-14402"></a><span class='hs-comment'>--       var = state_ops.assign(var, [[4.0, 5.0]])</span>
<a name="line-14403"></a><span class='hs-comment'>--       var = state_ops.assign_add(var, [[6.0, 7.0]])</span>
<a name="line-14404"></a><span class='hs-comment'>--       final = state_ops._destroy_temporary_variable(var, var_name=var_name)</span>
<a name="line-14405"></a><span class='hs-definition'>temporaryVariable</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14406"></a>                     <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: The shape of the variable tensor.</span>
<a name="line-14407"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __ref__: A reference to the variable tensor.</span>
<a name="line-14408"></a><span class='hs-definition'>temporaryVariable</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14409"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TemporaryVariable"</span>
<a name="line-14410"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-14411"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-14412"></a>        
<a name="line-14413"></a><span class='hs-comment'>{-
<a name="line-14414"></a>attr {
<a name="line-14415"></a>  description: "The shape of the variable tensor."
<a name="line-14416"></a>  name: "shape"
<a name="line-14417"></a>  type: "shape"
<a name="line-14418"></a>}
<a name="line-14419"></a>attr {
<a name="line-14420"></a>  description: "The type of elements in the variable tensor."
<a name="line-14421"></a>  name: "dtype"
<a name="line-14422"></a>  type: "type"
<a name="line-14423"></a>}
<a name="line-14424"></a>attr {
<a name="line-14425"></a>  default_value { s: "" }
<a name="line-14426"></a>  description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)."
<a name="line-14427"></a>  name: "var_name"
<a name="line-14428"></a>  type: "string"
<a name="line-14429"></a>}
<a name="line-14430"></a>output_arg {
<a name="line-14431"></a>  description: "A reference to the variable tensor."
<a name="line-14432"></a>  is_ref: true
<a name="line-14433"></a>  name: "ref"
<a name="line-14434"></a>  type_attr: "dtype"
<a name="line-14435"></a>}
<a name="line-14436"></a>-}</span>
<a name="line-14437"></a>
<a name="line-14438"></a><a name="isVariableInitialized"></a><span class='hs-comment'>-- | Checks whether a tensor has been initialized.</span>
<a name="line-14439"></a><span class='hs-comment'>--</span>
<a name="line-14440"></a><span class='hs-comment'>-- Outputs boolean scalar indicating whether the tensor has been initialized.</span>
<a name="line-14441"></a><span class='hs-definition'>isVariableInitialized</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14442"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node. May be uninitialized.</span>
<a name="line-14443"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __is_initialized__</span>
<a name="line-14444"></a><span class='hs-definition'>isVariableInitialized</span> <span class='hs-varid'>ref</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14445"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IsVariableInitialized"</span>
<a name="line-14446"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14447"></a>        <span class='hs-varid'>ref</span>
<a name="line-14448"></a><span class='hs-comment'>{-
<a name="line-14449"></a>attr {
<a name="line-14450"></a>  description: "The type of elements in the variable tensor."
<a name="line-14451"></a>  name: "dtype"
<a name="line-14452"></a>  type: "type"
<a name="line-14453"></a>}
<a name="line-14454"></a>input_arg {
<a name="line-14455"></a>  description: "Should be from a `Variable` node. May be uninitialized."
<a name="line-14456"></a>  is_ref: true
<a name="line-14457"></a>  name: "ref"
<a name="line-14458"></a>  type_attr: "dtype"
<a name="line-14459"></a>}
<a name="line-14460"></a>output_arg { name: "is_initialized" type: DT_BOOL }
<a name="line-14461"></a>-}</span>
<a name="line-14462"></a>
<a name="line-14463"></a><a name="variable"></a><span class='hs-comment'>-- | Holds state in the form of a tensor that persists across steps.</span>
<a name="line-14464"></a><span class='hs-comment'>--</span>
<a name="line-14465"></a><span class='hs-comment'>-- Outputs a ref to the tensor state so it may be read or modified.</span>
<a name="line-14466"></a><span class='hs-comment'>-- TODO(zhifengc/mrry): Adds a pointer to a more detail document</span>
<a name="line-14467"></a><span class='hs-comment'>-- about sharing states in tensorflow.</span>
<a name="line-14468"></a><span class='hs-definition'>variable</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14469"></a>            <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: The shape of the variable tensor.</span>
<a name="line-14470"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __ref__: A reference to the variable tensor.</span>
<a name="line-14471"></a><span class='hs-definition'>variable</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14472"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Variable"</span>
<a name="line-14473"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-14474"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-14475"></a>        
<a name="line-14476"></a><span class='hs-comment'>{-
<a name="line-14477"></a>attr {
<a name="line-14478"></a>  description: "The shape of the variable tensor."
<a name="line-14479"></a>  name: "shape"
<a name="line-14480"></a>  type: "shape"
<a name="line-14481"></a>}
<a name="line-14482"></a>attr {
<a name="line-14483"></a>  description: "The type of elements in the variable tensor."
<a name="line-14484"></a>  name: "dtype"
<a name="line-14485"></a>  type: "type"
<a name="line-14486"></a>}
<a name="line-14487"></a>attr {
<a name="line-14488"></a>  default_value { s: "" }
<a name="line-14489"></a>  description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used."
<a name="line-14490"></a>  name: "container"
<a name="line-14491"></a>  type: "string"
<a name="line-14492"></a>}
<a name="line-14493"></a>attr {
<a name="line-14494"></a>  default_value { s: "" }
<a name="line-14495"></a>  description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-14496"></a>  name: "shared_name"
<a name="line-14497"></a>  type: "string"
<a name="line-14498"></a>}
<a name="line-14499"></a>output_arg {
<a name="line-14500"></a>  description: "A reference to the variable tensor."
<a name="line-14501"></a>  is_ref: true
<a name="line-14502"></a>  name: "ref"
<a name="line-14503"></a>  type_attr: "dtype"
<a name="line-14504"></a>}
<a name="line-14505"></a>-}</span>
<a name="line-14506"></a>
<a name="line-14507"></a><a name="sparseSparseMinimum"></a><span class='hs-comment'>-- | Returns the element-wise min of two SparseTensors.</span>
<a name="line-14508"></a><span class='hs-comment'>--</span>
<a name="line-14509"></a><span class='hs-comment'>-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.</span>
<a name="line-14510"></a><span class='hs-definition'>sparseSparseMinimum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-14511"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14512"></a>                                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14513"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-14514"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14515"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-14516"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-14517"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-14518"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-14519"></a>                                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-14520"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14521"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-14522"></a>                                                <span class='hs-comment'>-- SparseTensor, in the canonical lexicographic ordering.</span>
<a name="line-14523"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.</span>
<a name="line-14524"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-14525"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_indices__: counterpart to `a_indices` for the other operand.</span>
<a name="line-14526"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.</span>
<a name="line-14527"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.</span>
<a name="line-14528"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-14529"></a>                       <span class='hs-comment'>-- ^ (__output_indices__, __output_values__)</span>
<a name="line-14530"></a>                       <span class='hs-comment'>--</span>
<a name="line-14531"></a>                       <span class='hs-comment'>-- * __output_indices__: 2-D.  The indices of the output SparseTensor.</span>
<a name="line-14532"></a>                       <span class='hs-comment'>--</span>
<a name="line-14533"></a>                       <span class='hs-comment'>-- * __output_values__: 1-D.  The values of the output SparseTensor.</span>
<a name="line-14534"></a><span class='hs-definition'>sparseSparseMinimum</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span>
<a name="line-14535"></a>                    <span class='hs-varid'>b_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14536"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSparseMinimum"</span>
<a name="line-14537"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14538"></a>        <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span> <span class='hs-varid'>b_shape</span>
<a name="line-14539"></a><span class='hs-comment'>{-
<a name="line-14540"></a>attr {
<a name="line-14541"></a>  allowed_values {
<a name="line-14542"></a>    list {
<a name="line-14543"></a>      type: DT_FLOAT
<a name="line-14544"></a>      type: DT_DOUBLE
<a name="line-14545"></a>      type: DT_INT64
<a name="line-14546"></a>      type: DT_INT32
<a name="line-14547"></a>      type: DT_UINT8
<a name="line-14548"></a>      type: DT_UINT16
<a name="line-14549"></a>      type: DT_INT16
<a name="line-14550"></a>      type: DT_INT8
<a name="line-14551"></a>      type: DT_COMPLEX64
<a name="line-14552"></a>      type: DT_COMPLEX128
<a name="line-14553"></a>      type: DT_QINT8
<a name="line-14554"></a>      type: DT_QUINT8
<a name="line-14555"></a>      type: DT_QINT32
<a name="line-14556"></a>      type: DT_HALF
<a name="line-14557"></a>    }
<a name="line-14558"></a>  }
<a name="line-14559"></a>  name: "T"
<a name="line-14560"></a>  type: "type"
<a name="line-14561"></a>}
<a name="line-14562"></a>input_arg {
<a name="line-14563"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
<a name="line-14564"></a>  name: "a_indices"
<a name="line-14565"></a>  type: DT_INT64
<a name="line-14566"></a>}
<a name="line-14567"></a>input_arg {
<a name="line-14568"></a>  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
<a name="line-14569"></a>  name: "a_values"
<a name="line-14570"></a>  type_attr: "T"
<a name="line-14571"></a>}
<a name="line-14572"></a>input_arg {
<a name="line-14573"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-14574"></a>  name: "a_shape"
<a name="line-14575"></a>  type: DT_INT64
<a name="line-14576"></a>}
<a name="line-14577"></a>input_arg {
<a name="line-14578"></a>  description: "counterpart to `a_indices` for the other operand."
<a name="line-14579"></a>  name: "b_indices"
<a name="line-14580"></a>  type: DT_INT64
<a name="line-14581"></a>}
<a name="line-14582"></a>input_arg {
<a name="line-14583"></a>  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
<a name="line-14584"></a>  name: "b_values"
<a name="line-14585"></a>  type_attr: "T"
<a name="line-14586"></a>}
<a name="line-14587"></a>input_arg {
<a name="line-14588"></a>  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
<a name="line-14589"></a>  name: "b_shape"
<a name="line-14590"></a>  type: DT_INT64
<a name="line-14591"></a>}
<a name="line-14592"></a>output_arg {
<a name="line-14593"></a>  description: "2-D.  The indices of the output SparseTensor."
<a name="line-14594"></a>  name: "output_indices"
<a name="line-14595"></a>  type: DT_INT64
<a name="line-14596"></a>}
<a name="line-14597"></a>output_arg {
<a name="line-14598"></a>  description: "1-D.  The values of the output SparseTensor."
<a name="line-14599"></a>  name: "output_values"
<a name="line-14600"></a>  type_attr: "T"
<a name="line-14601"></a>}
<a name="line-14602"></a>-}</span>
<a name="line-14603"></a>
<a name="line-14604"></a><a name="betainc"></a><span class='hs-comment'>-- | Compute the regularized incomplete beta integral \\(I_x(a, b)\\).</span>
<a name="line-14605"></a><span class='hs-comment'>--</span>
<a name="line-14606"></a><span class='hs-comment'>-- The regularized incomplete beta integral is defined as:</span>
<a name="line-14607"></a><span class='hs-comment'>-- </span>
<a name="line-14608"></a><span class='hs-comment'>-- ```</span>
<a name="line-14609"></a><span class='hs-comment'>-- I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}</span>
<a name="line-14610"></a><span class='hs-comment'>-- ```</span>
<a name="line-14611"></a><span class='hs-comment'>-- where</span>
<a name="line-14612"></a><span class='hs-comment'>-- </span>
<a name="line-14613"></a><span class='hs-comment'>-- ```</span>
<a name="line-14614"></a><span class='hs-comment'>-- B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt</span>
<a name="line-14615"></a><span class='hs-comment'>-- ```</span>
<a name="line-14616"></a><span class='hs-comment'>-- </span>
<a name="line-14617"></a><span class='hs-comment'>-- is the incomplete beta function and \\(B(a, b)\\) is the *complete*</span>
<a name="line-14618"></a><span class='hs-comment'>-- beta function.</span>
<a name="line-14619"></a><span class='hs-definition'>betainc</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14620"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a__</span>
<a name="line-14621"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b__</span>
<a name="line-14622"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-14623"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-14624"></a><span class='hs-definition'>betainc</span> <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14625"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Betainc"</span>
<a name="line-14626"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14627"></a>        <span class='hs-varid'>a</span> <span class='hs-varid'>b</span> <span class='hs-varid'>x</span>
<a name="line-14628"></a><span class='hs-comment'>{-
<a name="line-14629"></a>attr {
<a name="line-14630"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-14631"></a>  name: "T"
<a name="line-14632"></a>  type: "type"
<a name="line-14633"></a>}
<a name="line-14634"></a>input_arg { name: "a" type_attr: "T" }
<a name="line-14635"></a>input_arg { name: "b" type_attr: "T" }
<a name="line-14636"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-14637"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-14638"></a>-}</span>
<a name="line-14639"></a>
<a name="line-14640"></a><a name="assign"></a><span class='hs-comment'>-- | Update 'ref' by assigning 'value' to it.</span>
<a name="line-14641"></a><span class='hs-comment'>--</span>
<a name="line-14642"></a><span class='hs-comment'>-- This operation outputs "ref" after the assignment is done.</span>
<a name="line-14643"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-14644"></a><span class='hs-definition'>assign</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14645"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node. May be uninitialized.</span>
<a name="line-14646"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The value to be assigned to the variable.</span>
<a name="line-14647"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want</span>
<a name="line-14648"></a>          <span class='hs-comment'>-- to use the new value after the variable has been reset.</span>
<a name="line-14649"></a><span class='hs-definition'>assign</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14650"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Assign"</span>
<a name="line-14651"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14652"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span>
<a name="line-14653"></a><span class='hs-comment'>{-
<a name="line-14654"></a>attr { name: "T" type: "type" }
<a name="line-14655"></a>attr {
<a name="line-14656"></a>  default_value { b: true }
<a name="line-14657"></a>  description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to.  If false,\n\'ref\' will take on the shape of \'value\'."
<a name="line-14658"></a>  name: "validate_shape"
<a name="line-14659"></a>  type: "bool"
<a name="line-14660"></a>}
<a name="line-14661"></a>attr {
<a name="line-14662"></a>  default_value { b: true }
<a name="line-14663"></a>  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-14664"></a>  name: "use_locking"
<a name="line-14665"></a>  type: "bool"
<a name="line-14666"></a>}
<a name="line-14667"></a>input_arg {
<a name="line-14668"></a>  description: "Should be from a `Variable` node. May be uninitialized."
<a name="line-14669"></a>  is_ref: true
<a name="line-14670"></a>  name: "ref"
<a name="line-14671"></a>  type_attr: "T"
<a name="line-14672"></a>}
<a name="line-14673"></a>input_arg {
<a name="line-14674"></a>  description: "The value to be assigned to the variable."
<a name="line-14675"></a>  name: "value"
<a name="line-14676"></a>  type_attr: "T"
<a name="line-14677"></a>}
<a name="line-14678"></a>output_arg {
<a name="line-14679"></a>  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been reset."
<a name="line-14680"></a>  is_ref: true
<a name="line-14681"></a>  name: "output_ref"
<a name="line-14682"></a>  type_attr: "T"
<a name="line-14683"></a>}
<a name="line-14684"></a>-}</span>
<a name="line-14685"></a>
<a name="line-14686"></a><a name="sparseSoftmax"></a><span class='hs-comment'>-- | Applies softmax to a batched N-D `SparseTensor`.</span>
<a name="line-14687"></a><span class='hs-comment'>--</span>
<a name="line-14688"></a><span class='hs-comment'>-- The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`</span>
<a name="line-14689"></a><span class='hs-comment'>-- (where `N &gt;= 2`), and with indices sorted in the canonical lexicographic order.</span>
<a name="line-14690"></a><span class='hs-comment'>-- </span>
<a name="line-14691"></a><span class='hs-comment'>-- This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost</span>
<a name="line-14692"></a><span class='hs-comment'>-- logical submatrix with shape `[B, C]`, but with the catch that *the implicitly</span>
<a name="line-14693"></a><span class='hs-comment'>-- zero elements do not participate*.  Specifically, the algorithm is equivalent</span>
<a name="line-14694"></a><span class='hs-comment'>-- to the following:</span>
<a name="line-14695"></a><span class='hs-comment'>-- </span>
<a name="line-14696"></a><span class='hs-comment'>--   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix</span>
<a name="line-14697"></a><span class='hs-comment'>--       with shape `[B, C]`, along the size-C dimension;</span>
<a name="line-14698"></a><span class='hs-comment'>--   (2) Masks out the original implicitly-zero locations;</span>
<a name="line-14699"></a><span class='hs-comment'>--   (3) Renormalizes the remaining elements.</span>
<a name="line-14700"></a><span class='hs-comment'>-- </span>
<a name="line-14701"></a><span class='hs-comment'>-- Hence, the `SparseTensor` result has exactly the same non-zero indices and</span>
<a name="line-14702"></a><span class='hs-comment'>-- shape.</span>
<a name="line-14703"></a><span class='hs-definition'>sparseSoftmax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14704"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_indices__: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a</span>
<a name="line-14705"></a>                                          <span class='hs-comment'>-- SparseTensor, in canonical ordering.</span>
<a name="line-14706"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sp_values__: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.</span>
<a name="line-14707"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-14708"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D.  The `NNZ` values for the result `SparseTensor`.</span>
<a name="line-14709"></a><span class='hs-definition'>sparseSoftmax</span> <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14710"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSoftmax"</span>
<a name="line-14711"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14712"></a>        <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span>
<a name="line-14713"></a><span class='hs-comment'>{-
<a name="line-14714"></a>attr {
<a name="line-14715"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-14716"></a>  name: "T"
<a name="line-14717"></a>  type: "type"
<a name="line-14718"></a>}
<a name="line-14719"></a>input_arg {
<a name="line-14720"></a>  description: "2-D.  `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering."
<a name="line-14721"></a>  name: "sp_indices"
<a name="line-14722"></a>  type: DT_INT64
<a name="line-14723"></a>}
<a name="line-14724"></a>input_arg {
<a name="line-14725"></a>  description: "1-D.  `NNZ` non-empty values corresponding to `sp_indices`."
<a name="line-14726"></a>  name: "sp_values"
<a name="line-14727"></a>  type_attr: "T"
<a name="line-14728"></a>}
<a name="line-14729"></a>input_arg {
<a name="line-14730"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-14731"></a>  name: "sp_shape"
<a name="line-14732"></a>  type: DT_INT64
<a name="line-14733"></a>}
<a name="line-14734"></a>output_arg {
<a name="line-14735"></a>  description: "1-D.  The `NNZ` values for the result `SparseTensor`."
<a name="line-14736"></a>  name: "output"
<a name="line-14737"></a>  type_attr: "T"
<a name="line-14738"></a>}
<a name="line-14739"></a>-}</span>
<a name="line-14740"></a>
<a name="line-14741"></a><a name="sparseDenseCwiseAdd"></a><span class='hs-comment'>-- | Adds up a SparseTensor and a dense Tensor, using these special rules:</span>
<a name="line-14742"></a><span class='hs-comment'>--</span>
<a name="line-14743"></a><span class='hs-comment'>-- (1) Broadcasts the dense side to have the same shape as the sparse side, if</span>
<a name="line-14744"></a><span class='hs-comment'>--     eligible;</span>
<a name="line-14745"></a><span class='hs-comment'>-- (2) Then, only the dense values pointed to by the indices of the SparseTensor</span>
<a name="line-14746"></a><span class='hs-comment'>--     participate in the cwise addition.</span>
<a name="line-14747"></a><span class='hs-comment'>-- </span>
<a name="line-14748"></a><span class='hs-comment'>-- By these rules, the result is a logical SparseTensor with exactly the same</span>
<a name="line-14749"></a><span class='hs-comment'>-- indices and shape, but possibly with different non-zero values.  The output of</span>
<a name="line-14750"></a><span class='hs-comment'>-- this Op is the resultant non-zero values.</span>
<a name="line-14751"></a><span class='hs-definition'>sparseDenseCwiseAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-14752"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14753"></a>                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14754"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-14755"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14756"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-14757"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-14758"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-14759"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-14760"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14761"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-14762"></a>                                                <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-14763"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.</span>
<a name="line-14764"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-14765"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __dense__: `R`-D.  The dense Tensor operand.</span>
<a name="line-14766"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D.  The `N` values that are operated on.</span>
<a name="line-14767"></a><span class='hs-definition'>sparseDenseCwiseAdd</span> <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14768"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseDenseCwiseAdd"</span>
<a name="line-14769"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14770"></a>        <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span>
<a name="line-14771"></a><span class='hs-comment'>{-
<a name="line-14772"></a>attr {
<a name="line-14773"></a>  allowed_values {
<a name="line-14774"></a>    list {
<a name="line-14775"></a>      type: DT_FLOAT
<a name="line-14776"></a>      type: DT_DOUBLE
<a name="line-14777"></a>      type: DT_INT64
<a name="line-14778"></a>      type: DT_INT32
<a name="line-14779"></a>      type: DT_UINT8
<a name="line-14780"></a>      type: DT_UINT16
<a name="line-14781"></a>      type: DT_INT16
<a name="line-14782"></a>      type: DT_INT8
<a name="line-14783"></a>      type: DT_COMPLEX64
<a name="line-14784"></a>      type: DT_COMPLEX128
<a name="line-14785"></a>      type: DT_QINT8
<a name="line-14786"></a>      type: DT_QUINT8
<a name="line-14787"></a>      type: DT_QINT32
<a name="line-14788"></a>      type: DT_HALF
<a name="line-14789"></a>    }
<a name="line-14790"></a>  }
<a name="line-14791"></a>  name: "T"
<a name="line-14792"></a>  type: "type"
<a name="line-14793"></a>}
<a name="line-14794"></a>input_arg {
<a name="line-14795"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-14796"></a>  name: "sp_indices"
<a name="line-14797"></a>  type: DT_INT64
<a name="line-14798"></a>}
<a name="line-14799"></a>input_arg {
<a name="line-14800"></a>  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
<a name="line-14801"></a>  name: "sp_values"
<a name="line-14802"></a>  type_attr: "T"
<a name="line-14803"></a>}
<a name="line-14804"></a>input_arg {
<a name="line-14805"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-14806"></a>  name: "sp_shape"
<a name="line-14807"></a>  type: DT_INT64
<a name="line-14808"></a>}
<a name="line-14809"></a>input_arg {
<a name="line-14810"></a>  description: "`R`-D.  The dense Tensor operand."
<a name="line-14811"></a>  name: "dense"
<a name="line-14812"></a>  type_attr: "T"
<a name="line-14813"></a>}
<a name="line-14814"></a>output_arg {
<a name="line-14815"></a>  description: "1-D.  The `N` values that are operated on."
<a name="line-14816"></a>  name: "output"
<a name="line-14817"></a>  type_attr: "T"
<a name="line-14818"></a>}
<a name="line-14819"></a>-}</span>
<a name="line-14820"></a>
<a name="line-14821"></a><span class='hs-comment'>-- | Returns the truth value of NOT x element-wise.</span>
<a name="line-14822"></a>
<a name="line-14823"></a><a name="logicalNot"></a><span class='hs-definition'>logicalNot</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-14824"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-14825"></a><span class='hs-definition'>logicalNot</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14826"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LogicalNot"</span><span class='hs-layout'>)</span>
<a name="line-14827"></a>        <span class='hs-varid'>x</span>
<a name="line-14828"></a><span class='hs-comment'>{-
<a name="line-14829"></a>input_arg { name: "x" type: DT_BOOL }
<a name="line-14830"></a>output_arg { name: "y" type: DT_BOOL }
<a name="line-14831"></a>-}</span>
<a name="line-14832"></a>
<a name="line-14833"></a><span class='hs-comment'>-- | Computes the number of elements in the given queue.</span>
<a name="line-14834"></a>
<a name="line-14835"></a><a name="queueSize"></a><span class='hs-definition'>queueSize</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a queue.</span>
<a name="line-14836"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __size__: The number of elements in the given queue.</span>
<a name="line-14837"></a><span class='hs-definition'>queueSize</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14838"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QueueSize"</span><span class='hs-layout'>)</span>
<a name="line-14839"></a>        <span class='hs-varid'>handle</span>
<a name="line-14840"></a><span class='hs-comment'>{-
<a name="line-14841"></a>input_arg {
<a name="line-14842"></a>  description: "The handle to a queue."
<a name="line-14843"></a>  is_ref: true
<a name="line-14844"></a>  name: "handle"
<a name="line-14845"></a>  type: DT_STRING
<a name="line-14846"></a>}
<a name="line-14847"></a>output_arg {
<a name="line-14848"></a>  description: "The number of elements in the given queue."
<a name="line-14849"></a>  name: "size"
<a name="line-14850"></a>  type: DT_INT32
<a name="line-14851"></a>}
<a name="line-14852"></a>-}</span>
<a name="line-14853"></a>
<a name="line-14854"></a><a name="sparseApplyAdagrad"></a><span class='hs-comment'>-- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme.</span>
<a name="line-14855"></a><span class='hs-comment'>--</span>
<a name="line-14856"></a><span class='hs-comment'>-- That is for rows we have grad for, we update var and accum as follows:</span>
<a name="line-14857"></a><span class='hs-comment'>-- accum += grad * grad</span>
<a name="line-14858"></a><span class='hs-comment'>-- var -= lr * grad * (1 / sqrt(accum))</span>
<a name="line-14859"></a><span class='hs-definition'>sparseApplyAdagrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-14860"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14861"></a>                                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14862"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-14863"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14864"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-14865"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-14866"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-14867"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-14868"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-14869"></a>                                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-14870"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14871"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14872"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-14873"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-14874"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Learning rate. Must be a scalar.</span>
<a name="line-14875"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-14876"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-14877"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-14878"></a><span class='hs-definition'>sparseApplyAdagrad</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14879"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyAdagrad"</span>
<a name="line-14880"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-14881"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14882"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-14883"></a><span class='hs-comment'>{-
<a name="line-14884"></a>attr {
<a name="line-14885"></a>  allowed_values {
<a name="line-14886"></a>    list {
<a name="line-14887"></a>      type: DT_FLOAT
<a name="line-14888"></a>      type: DT_DOUBLE
<a name="line-14889"></a>      type: DT_INT64
<a name="line-14890"></a>      type: DT_INT32
<a name="line-14891"></a>      type: DT_UINT8
<a name="line-14892"></a>      type: DT_UINT16
<a name="line-14893"></a>      type: DT_INT16
<a name="line-14894"></a>      type: DT_INT8
<a name="line-14895"></a>      type: DT_COMPLEX64
<a name="line-14896"></a>      type: DT_COMPLEX128
<a name="line-14897"></a>      type: DT_QINT8
<a name="line-14898"></a>      type: DT_QUINT8
<a name="line-14899"></a>      type: DT_QINT32
<a name="line-14900"></a>      type: DT_HALF
<a name="line-14901"></a>    }
<a name="line-14902"></a>  }
<a name="line-14903"></a>  name: "T"
<a name="line-14904"></a>  type: "type"
<a name="line-14905"></a>}
<a name="line-14906"></a>attr {
<a name="line-14907"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-14908"></a>  name: "Tindices"
<a name="line-14909"></a>  type: "type"
<a name="line-14910"></a>}
<a name="line-14911"></a>attr {
<a name="line-14912"></a>  default_value { b: false }
<a name="line-14913"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-14914"></a>  name: "use_locking"
<a name="line-14915"></a>  type: "bool"
<a name="line-14916"></a>}
<a name="line-14917"></a>input_arg {
<a name="line-14918"></a>  description: "Should be from a Variable()."
<a name="line-14919"></a>  is_ref: true
<a name="line-14920"></a>  name: "var"
<a name="line-14921"></a>  type_attr: "T"
<a name="line-14922"></a>}
<a name="line-14923"></a>input_arg {
<a name="line-14924"></a>  description: "Should be from a Variable()."
<a name="line-14925"></a>  is_ref: true
<a name="line-14926"></a>  name: "accum"
<a name="line-14927"></a>  type_attr: "T"
<a name="line-14928"></a>}
<a name="line-14929"></a>input_arg {
<a name="line-14930"></a>  description: "Learning rate. Must be a scalar."
<a name="line-14931"></a>  name: "lr"
<a name="line-14932"></a>  type_attr: "T"
<a name="line-14933"></a>}
<a name="line-14934"></a>input_arg {
<a name="line-14935"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-14936"></a>}
<a name="line-14937"></a>input_arg {
<a name="line-14938"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-14939"></a>  name: "indices"
<a name="line-14940"></a>  type_attr: "Tindices"
<a name="line-14941"></a>}
<a name="line-14942"></a>output_arg {
<a name="line-14943"></a>  description: "Same as \"var\"."
<a name="line-14944"></a>  is_ref: true
<a name="line-14945"></a>  name: "out"
<a name="line-14946"></a>  type_attr: "T"
<a name="line-14947"></a>}
<a name="line-14948"></a>-}</span>
<a name="line-14949"></a>
<a name="line-14950"></a><span class='hs-comment'>-- | Store the input tensor in the state of the current session.</span>
<a name="line-14951"></a>
<a name="line-14952"></a><a name="getSessionHandle"></a><span class='hs-definition'>getSessionHandle</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14953"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The tensor to be stored.</span>
<a name="line-14954"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle for the tensor stored in the session state.</span>
<a name="line-14955"></a><span class='hs-definition'>getSessionHandle</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14956"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"GetSessionHandle"</span>
<a name="line-14957"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-14958"></a>        <span class='hs-varid'>value</span>
<a name="line-14959"></a><span class='hs-comment'>{-
<a name="line-14960"></a>attr { name: "T" type: "type" }
<a name="line-14961"></a>input_arg {
<a name="line-14962"></a>  description: "The tensor to be stored."
<a name="line-14963"></a>  name: "value"
<a name="line-14964"></a>  type_attr: "T"
<a name="line-14965"></a>}
<a name="line-14966"></a>output_arg {
<a name="line-14967"></a>  description: "The handle for the tensor stored in the session state."
<a name="line-14968"></a>  name: "handle"
<a name="line-14969"></a>  type: DT_STRING
<a name="line-14970"></a>}
<a name="line-14971"></a>-}</span>
<a name="line-14972"></a>
<a name="line-14973"></a><a name="sparseDenseCwiseMul"></a><span class='hs-comment'>-- | Component-wise multiplies a SparseTensor by a dense Tensor.</span>
<a name="line-14974"></a><span class='hs-comment'>--</span>
<a name="line-14975"></a><span class='hs-comment'>-- The output locations corresponding to the implicitly zero elements in the sparse</span>
<a name="line-14976"></a><span class='hs-comment'>-- tensor will be zero (i.e., will not take up storage space), regardless of the</span>
<a name="line-14977"></a><span class='hs-comment'>-- contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).</span>
<a name="line-14978"></a><span class='hs-comment'>-- </span>
<a name="line-14979"></a><span class='hs-comment'>-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not</span>
<a name="line-14980"></a><span class='hs-comment'>-- the other direction.</span>
<a name="line-14981"></a><span class='hs-definition'>sparseDenseCwiseMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-14982"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14983"></a>                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-14984"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-14985"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-14986"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-14987"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-14988"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-14989"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-14990"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-14991"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-14992"></a>                                                <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-14993"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.</span>
<a name="line-14994"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-14995"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __dense__: `R`-D.  The dense Tensor operand.</span>
<a name="line-14996"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D.  The `N` values that are operated on.</span>
<a name="line-14997"></a><span class='hs-definition'>sparseDenseCwiseMul</span> <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-14998"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseDenseCwiseMul"</span>
<a name="line-14999"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15000"></a>        <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span>
<a name="line-15001"></a><span class='hs-comment'>{-
<a name="line-15002"></a>attr {
<a name="line-15003"></a>  allowed_values {
<a name="line-15004"></a>    list {
<a name="line-15005"></a>      type: DT_FLOAT
<a name="line-15006"></a>      type: DT_DOUBLE
<a name="line-15007"></a>      type: DT_INT64
<a name="line-15008"></a>      type: DT_INT32
<a name="line-15009"></a>      type: DT_UINT8
<a name="line-15010"></a>      type: DT_UINT16
<a name="line-15011"></a>      type: DT_INT16
<a name="line-15012"></a>      type: DT_INT8
<a name="line-15013"></a>      type: DT_COMPLEX64
<a name="line-15014"></a>      type: DT_COMPLEX128
<a name="line-15015"></a>      type: DT_QINT8
<a name="line-15016"></a>      type: DT_QUINT8
<a name="line-15017"></a>      type: DT_QINT32
<a name="line-15018"></a>      type: DT_HALF
<a name="line-15019"></a>    }
<a name="line-15020"></a>  }
<a name="line-15021"></a>  name: "T"
<a name="line-15022"></a>  type: "type"
<a name="line-15023"></a>}
<a name="line-15024"></a>input_arg {
<a name="line-15025"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-15026"></a>  name: "sp_indices"
<a name="line-15027"></a>  type: DT_INT64
<a name="line-15028"></a>}
<a name="line-15029"></a>input_arg {
<a name="line-15030"></a>  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
<a name="line-15031"></a>  name: "sp_values"
<a name="line-15032"></a>  type_attr: "T"
<a name="line-15033"></a>}
<a name="line-15034"></a>input_arg {
<a name="line-15035"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-15036"></a>  name: "sp_shape"
<a name="line-15037"></a>  type: DT_INT64
<a name="line-15038"></a>}
<a name="line-15039"></a>input_arg {
<a name="line-15040"></a>  description: "`R`-D.  The dense Tensor operand."
<a name="line-15041"></a>  name: "dense"
<a name="line-15042"></a>  type_attr: "T"
<a name="line-15043"></a>}
<a name="line-15044"></a>output_arg {
<a name="line-15045"></a>  description: "1-D.  The `N` values that are operated on."
<a name="line-15046"></a>  name: "output"
<a name="line-15047"></a>  type_attr: "T"
<a name="line-15048"></a>}
<a name="line-15049"></a>-}</span>
<a name="line-15050"></a>
<a name="line-15051"></a><a name="sparseTensorDenseAdd"></a><span class='hs-comment'>-- | Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.</span>
<a name="line-15052"></a><span class='hs-comment'>--</span>
<a name="line-15053"></a><span class='hs-comment'>-- This Op does not require `a_indices` be sorted in standard lexicographic order.</span>
<a name="line-15054"></a><span class='hs-definition'>sparseTensorDenseAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-15055"></a>                                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-15056"></a>                                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-15057"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-15058"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15059"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-15060"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-15061"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-15062"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-15063"></a>                                                                 <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-15064"></a>                                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-15065"></a>                                                         <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-15066"></a>                                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15067"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15068"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.</span>
<a name="line-15069"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.</span>
<a name="line-15070"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.</span>
<a name="line-15071"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b__: `ndims`-D Tensor.  With shape `a_shape`.</span>
<a name="line-15072"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-15073"></a><span class='hs-definition'>sparseTensorDenseAdd</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15074"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseTensorDenseAdd"</span>
<a name="line-15075"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15076"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15077"></a>        <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b</span>
<a name="line-15078"></a><span class='hs-comment'>{-
<a name="line-15079"></a>attr {
<a name="line-15080"></a>  allowed_values {
<a name="line-15081"></a>    list {
<a name="line-15082"></a>      type: DT_FLOAT
<a name="line-15083"></a>      type: DT_DOUBLE
<a name="line-15084"></a>      type: DT_INT64
<a name="line-15085"></a>      type: DT_INT32
<a name="line-15086"></a>      type: DT_UINT8
<a name="line-15087"></a>      type: DT_UINT16
<a name="line-15088"></a>      type: DT_INT16
<a name="line-15089"></a>      type: DT_INT8
<a name="line-15090"></a>      type: DT_COMPLEX64
<a name="line-15091"></a>      type: DT_COMPLEX128
<a name="line-15092"></a>      type: DT_QINT8
<a name="line-15093"></a>      type: DT_QUINT8
<a name="line-15094"></a>      type: DT_QINT32
<a name="line-15095"></a>      type: DT_HALF
<a name="line-15096"></a>    }
<a name="line-15097"></a>  }
<a name="line-15098"></a>  name: "T"
<a name="line-15099"></a>  type: "type"
<a name="line-15100"></a>}
<a name="line-15101"></a>attr {
<a name="line-15102"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-15103"></a>  name: "Tindices"
<a name="line-15104"></a>  type: "type"
<a name="line-15105"></a>}
<a name="line-15106"></a>input_arg {
<a name="line-15107"></a>  description: "2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`."
<a name="line-15108"></a>  name: "a_indices"
<a name="line-15109"></a>  type_attr: "Tindices"
<a name="line-15110"></a>}
<a name="line-15111"></a>input_arg {
<a name="line-15112"></a>  description: "1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`."
<a name="line-15113"></a>  name: "a_values"
<a name="line-15114"></a>  type_attr: "T"
<a name="line-15115"></a>}
<a name="line-15116"></a>input_arg {
<a name="line-15117"></a>  description: "1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`."
<a name="line-15118"></a>  name: "a_shape"
<a name="line-15119"></a>  type_attr: "Tindices"
<a name="line-15120"></a>}
<a name="line-15121"></a>input_arg {
<a name="line-15122"></a>  description: "`ndims`-D Tensor.  With shape `a_shape`."
<a name="line-15123"></a>  name: "b"
<a name="line-15124"></a>  type_attr: "T"
<a name="line-15125"></a>}
<a name="line-15126"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-15127"></a>-}</span>
<a name="line-15128"></a>
<a name="line-15129"></a><span class='hs-comment'>-- | Get the value of the tensor specified by its handle.</span>
<a name="line-15130"></a>
<a name="line-15131"></a><a name="getSessionTensor"></a><span class='hs-definition'>getSessionTensor</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15132"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle for a tensor stored in the session state.</span>
<a name="line-15133"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: The tensor for the given handle.</span>
<a name="line-15134"></a><span class='hs-definition'>getSessionTensor</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15135"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"GetSessionTensor"</span>
<a name="line-15136"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15137"></a>        <span class='hs-varid'>handle</span>
<a name="line-15138"></a><span class='hs-comment'>{-
<a name="line-15139"></a>attr {
<a name="line-15140"></a>  description: "The type of the output value."
<a name="line-15141"></a>  name: "dtype"
<a name="line-15142"></a>  type: "type"
<a name="line-15143"></a>}
<a name="line-15144"></a>input_arg {
<a name="line-15145"></a>  description: "The handle for a tensor stored in the session state."
<a name="line-15146"></a>  name: "handle"
<a name="line-15147"></a>  type: DT_STRING
<a name="line-15148"></a>}
<a name="line-15149"></a>output_arg {
<a name="line-15150"></a>  description: "The tensor for the given handle."
<a name="line-15151"></a>  name: "value"
<a name="line-15152"></a>  type_attr: "dtype"
<a name="line-15153"></a>}
<a name="line-15154"></a>-}</span>
<a name="line-15155"></a>
<a name="line-15156"></a><a name="sparseReorder"></a><span class='hs-comment'>-- | Reorders a SparseTensor into the canonical, row-major ordering.</span>
<a name="line-15157"></a><span class='hs-comment'>--</span>
<a name="line-15158"></a><span class='hs-comment'>-- Note that by convention, all sparse ops preserve the canonical ordering along</span>
<a name="line-15159"></a><span class='hs-comment'>-- increasing dimension number. The only time ordering can be violated is during</span>
<a name="line-15160"></a><span class='hs-comment'>-- manual manipulation of the indices and values vectors to add entries.</span>
<a name="line-15161"></a><span class='hs-comment'>-- </span>
<a name="line-15162"></a><span class='hs-comment'>-- Reordering does not affect the shape of the SparseTensor.</span>
<a name="line-15163"></a><span class='hs-comment'>-- </span>
<a name="line-15164"></a><span class='hs-comment'>-- If the tensor has rank `R` and `N` non-empty values, `input_indices` has</span>
<a name="line-15165"></a><span class='hs-comment'>-- shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.</span>
<a name="line-15166"></a><span class='hs-definition'>sparseReorder</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15167"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-15168"></a>                                          <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-15169"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.</span>
<a name="line-15170"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-15171"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15172"></a>                 <span class='hs-comment'>-- ^ (__output_indices__, __output_values__)</span>
<a name="line-15173"></a>                 <span class='hs-comment'>--</span>
<a name="line-15174"></a>                 <span class='hs-comment'>-- * __output_indices__: 2-D.  `N x R` matrix with the same indices as input_indices, but</span>
<a name="line-15175"></a>                 <span class='hs-comment'>-- in canonical row-major ordering.</span>
<a name="line-15176"></a>                 <span class='hs-comment'>--</span>
<a name="line-15177"></a>                 <span class='hs-comment'>-- * __output_values__: 1-D.  `N` non-empty values corresponding to `output_indices`.</span>
<a name="line-15178"></a><span class='hs-definition'>sparseReorder</span> <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15179"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseReorder"</span>
<a name="line-15180"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15181"></a>        <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span>
<a name="line-15182"></a><span class='hs-comment'>{-
<a name="line-15183"></a>attr { name: "T" type: "type" }
<a name="line-15184"></a>input_arg {
<a name="line-15185"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-15186"></a>  name: "input_indices"
<a name="line-15187"></a>  type: DT_INT64
<a name="line-15188"></a>}
<a name="line-15189"></a>input_arg {
<a name="line-15190"></a>  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
<a name="line-15191"></a>  name: "input_values"
<a name="line-15192"></a>  type_attr: "T"
<a name="line-15193"></a>}
<a name="line-15194"></a>input_arg {
<a name="line-15195"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-15196"></a>  name: "input_shape"
<a name="line-15197"></a>  type: DT_INT64
<a name="line-15198"></a>}
<a name="line-15199"></a>output_arg {
<a name="line-15200"></a>  description: "2-D.  `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering."
<a name="line-15201"></a>  name: "output_indices"
<a name="line-15202"></a>  type: DT_INT64
<a name="line-15203"></a>}
<a name="line-15204"></a>output_arg {
<a name="line-15205"></a>  description: "1-D.  `N` non-empty values corresponding to `output_indices`."
<a name="line-15206"></a>  name: "output_values"
<a name="line-15207"></a>  type_attr: "T"
<a name="line-15208"></a>}
<a name="line-15209"></a>-}</span>
<a name="line-15210"></a>
<a name="line-15211"></a><a name="sparseSplit"></a><span class='hs-comment'>-- | Split a `SparseTensor` into `num_split` tensors along one dimension.</span>
<a name="line-15212"></a><span class='hs-comment'>--</span>
<a name="line-15213"></a><span class='hs-comment'>-- If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices</span>
<a name="line-15214"></a><span class='hs-comment'>-- `[0 : shape[split_dim] % num_split]` gets one extra dimension.</span>
<a name="line-15215"></a><span class='hs-comment'>-- For example, if `split_dim = 1` and `num_split = 2` and the input is</span>
<a name="line-15216"></a><span class='hs-comment'>-- </span>
<a name="line-15217"></a><span class='hs-comment'>--     input_tensor = shape = [2, 7]</span>
<a name="line-15218"></a><span class='hs-comment'>--     [    a   d e  ]</span>
<a name="line-15219"></a><span class='hs-comment'>--     [b c          ]</span>
<a name="line-15220"></a><span class='hs-comment'>-- </span>
<a name="line-15221"></a><span class='hs-comment'>-- Graphically the output tensors are:</span>
<a name="line-15222"></a><span class='hs-comment'>-- </span>
<a name="line-15223"></a><span class='hs-comment'>--     output_tensor[0] = shape = [2, 4]</span>
<a name="line-15224"></a><span class='hs-comment'>--     [    a  ]</span>
<a name="line-15225"></a><span class='hs-comment'>--     [b c    ]</span>
<a name="line-15226"></a><span class='hs-comment'>-- </span>
<a name="line-15227"></a><span class='hs-comment'>--     output_tensor[1] = shape = [2, 3]</span>
<a name="line-15228"></a><span class='hs-comment'>--     [ d e  ]</span>
<a name="line-15229"></a><span class='hs-comment'>--     [      ]</span>
<a name="line-15230"></a><span class='hs-definition'>sparseSplit</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15231"></a>               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_split__: The number of ways to split.</span>
<a name="line-15232"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range</span>
<a name="line-15233"></a>                                           <span class='hs-comment'>-- `[0, rank(shape))`.</span>
<a name="line-15234"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.</span>
<a name="line-15235"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __values__: 1-D tensor represents the values of the sparse tensor.</span>
<a name="line-15236"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.</span>
<a name="line-15237"></a>                                           <span class='hs-comment'>-- output indices: A list of 1-D tensors represents the indices of the output</span>
<a name="line-15238"></a>                                           <span class='hs-comment'>-- sparse tensors.</span>
<a name="line-15239"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span>
<a name="line-15240"></a>                   <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span>
<a name="line-15241"></a>               <span class='hs-comment'>-- ^ (__output_indices__, __output_values__, __output_shape__)</span>
<a name="line-15242"></a>               <span class='hs-comment'>--</span>
<a name="line-15243"></a>               <span class='hs-comment'>-- * __output_indices__</span>
<a name="line-15244"></a>               <span class='hs-comment'>--</span>
<a name="line-15245"></a>               <span class='hs-comment'>-- * __output_values__: A list of 1-D tensors represents the values of the output sparse</span>
<a name="line-15246"></a>               <span class='hs-comment'>-- tensors.</span>
<a name="line-15247"></a>               <span class='hs-comment'>--</span>
<a name="line-15248"></a>               <span class='hs-comment'>-- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse</span>
<a name="line-15249"></a>               <span class='hs-comment'>-- tensors.</span>
<a name="line-15250"></a><span class='hs-definition'>sparseSplit</span> <span class='hs-varid'>num_split</span> <span class='hs-varid'>split_dim</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>values</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15251"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num_split</span><span class='hs-layout'>,</span> <span class='hs-varid'>num_split</span><span class='hs-layout'>,</span> <span class='hs-varid'>num_split</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSplit"</span>
<a name="line-15252"></a>                                                   <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15253"></a>                                                   <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_split"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_split</span><span class='hs-layout'>)</span>
<a name="line-15254"></a>        <span class='hs-varid'>split_dim</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>values</span> <span class='hs-varid'>shape</span>
<a name="line-15255"></a><span class='hs-comment'>{-
<a name="line-15256"></a>attr {
<a name="line-15257"></a>  description: "The number of ways to split."
<a name="line-15258"></a>  has_minimum: true
<a name="line-15259"></a>  minimum: 1
<a name="line-15260"></a>  name: "num_split"
<a name="line-15261"></a>  type: "int"
<a name="line-15262"></a>}
<a name="line-15263"></a>attr { name: "T" type: "type" }
<a name="line-15264"></a>input_arg {
<a name="line-15265"></a>  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(shape))`."
<a name="line-15266"></a>  name: "split_dim"
<a name="line-15267"></a>  type: DT_INT64
<a name="line-15268"></a>}
<a name="line-15269"></a>input_arg {
<a name="line-15270"></a>  description: "2-D tensor represents the indices of the sparse tensor."
<a name="line-15271"></a>  name: "indices"
<a name="line-15272"></a>  type: DT_INT64
<a name="line-15273"></a>}
<a name="line-15274"></a>input_arg {
<a name="line-15275"></a>  description: "1-D tensor represents the values of the sparse tensor."
<a name="line-15276"></a>  name: "values"
<a name="line-15277"></a>  type_attr: "T"
<a name="line-15278"></a>}
<a name="line-15279"></a>input_arg {
<a name="line-15280"></a>  description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors."
<a name="line-15281"></a>  name: "shape"
<a name="line-15282"></a>  type: DT_INT64
<a name="line-15283"></a>}
<a name="line-15284"></a>output_arg {
<a name="line-15285"></a>  name: "output_indices" number_attr: "num_split" type: DT_INT64
<a name="line-15286"></a>}
<a name="line-15287"></a>output_arg {
<a name="line-15288"></a>  description: "A list of 1-D tensors represents the values of the output sparse\ntensors."
<a name="line-15289"></a>  name: "output_values"
<a name="line-15290"></a>  number_attr: "num_split"
<a name="line-15291"></a>  type_attr: "T"
<a name="line-15292"></a>}
<a name="line-15293"></a>output_arg {
<a name="line-15294"></a>  description: "A list of 1-D tensors represents the shape of the output sparse\ntensors."
<a name="line-15295"></a>  name: "output_shape"
<a name="line-15296"></a>  number_attr: "num_split"
<a name="line-15297"></a>  type: DT_INT64
<a name="line-15298"></a>}
<a name="line-15299"></a>-}</span>
<a name="line-15300"></a>
<a name="line-15301"></a><a name="pad"></a><span class='hs-comment'>-- | Pads a tensor with zeros.</span>
<a name="line-15302"></a><span class='hs-comment'>--</span>
<a name="line-15303"></a><span class='hs-comment'>-- This operation pads a `input` with zeros according to the `paddings` you</span>
<a name="line-15304"></a><span class='hs-comment'>-- specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the</span>
<a name="line-15305"></a><span class='hs-comment'>-- rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates</span>
<a name="line-15306"></a><span class='hs-comment'>-- how many zeros to add before the contents of `input` in that dimension, and</span>
<a name="line-15307"></a><span class='hs-comment'>-- `paddings[D, 1]` indicates how many zeros to add after the contents of `input`</span>
<a name="line-15308"></a><span class='hs-comment'>-- in that dimension.</span>
<a name="line-15309"></a><span class='hs-comment'>-- </span>
<a name="line-15310"></a><span class='hs-comment'>-- The padded size of each dimension D of the output is:</span>
<a name="line-15311"></a><span class='hs-comment'>-- </span>
<a name="line-15312"></a><span class='hs-comment'>-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`</span>
<a name="line-15313"></a><span class='hs-comment'>-- </span>
<a name="line-15314"></a><span class='hs-comment'>-- For example:</span>
<a name="line-15315"></a><span class='hs-comment'>-- </span>
<a name="line-15316"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-15317"></a><span class='hs-comment'>-- # 't' is [[1, 1], [2, 2]]</span>
<a name="line-15318"></a><span class='hs-comment'>-- # 'paddings' is [[1, 1], [2, 2]]</span>
<a name="line-15319"></a><span class='hs-comment'>-- # rank of 't' is 2</span>
<a name="line-15320"></a><span class='hs-comment'>-- pad(t, paddings) ==&gt; [[0, 0, 0, 0, 0, 0]</span>
<a name="line-15321"></a><span class='hs-comment'>--                       [0, 0, 1, 1, 0, 0]</span>
<a name="line-15322"></a><span class='hs-comment'>--                       [0, 0, 2, 2, 0, 0]</span>
<a name="line-15323"></a><span class='hs-comment'>--                       [0, 0, 0, 0, 0, 0]]</span>
<a name="line-15324"></a><span class='hs-comment'>-- ```</span>
<a name="line-15325"></a><span class='hs-definition'>pad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tpaddings</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>,</span>
<a name="line-15326"></a>                                   <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15327"></a>                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15328"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-15329"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tpaddings</span> <span class='hs-comment'>-- ^ __paddings__</span>
<a name="line-15330"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-15331"></a><span class='hs-definition'>pad</span> <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15332"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Pad"</span>
<a name="line-15333"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15334"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tpaddings"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15335"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span>
<a name="line-15336"></a><span class='hs-comment'>{-
<a name="line-15337"></a>attr { name: "T" type: "type" }
<a name="line-15338"></a>attr {
<a name="line-15339"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-15340"></a>  default_value { type: DT_INT32 }
<a name="line-15341"></a>  name: "Tpaddings"
<a name="line-15342"></a>  type: "type"
<a name="line-15343"></a>}
<a name="line-15344"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-15345"></a>input_arg { name: "paddings" type_attr: "Tpaddings" }
<a name="line-15346"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-15347"></a>-}</span>
<a name="line-15348"></a>
<a name="line-15349"></a><a name="sparseToDense"></a><span class='hs-comment'>-- | Converts a sparse representation into a dense tensor.</span>
<a name="line-15350"></a><span class='hs-comment'>--</span>
<a name="line-15351"></a><span class='hs-comment'>-- Builds an array `dense` with shape `output_shape` such that</span>
<a name="line-15352"></a><span class='hs-comment'>-- </span>
<a name="line-15353"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-15354"></a><span class='hs-comment'>-- # If sparse_indices is scalar</span>
<a name="line-15355"></a><span class='hs-comment'>-- dense[i] = (i == sparse_indices ? sparse_values : default_value)</span>
<a name="line-15356"></a><span class='hs-comment'>-- </span>
<a name="line-15357"></a><span class='hs-comment'>-- # If sparse_indices is a vector, then for each i</span>
<a name="line-15358"></a><span class='hs-comment'>-- dense[sparse_indices[i]] = sparse_values[i]</span>
<a name="line-15359"></a><span class='hs-comment'>-- </span>
<a name="line-15360"></a><span class='hs-comment'>-- # If sparse_indices is an n by d matrix, then for each i in [0, n)</span>
<a name="line-15361"></a><span class='hs-comment'>-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]</span>
<a name="line-15362"></a><span class='hs-comment'>-- ```</span>
<a name="line-15363"></a><span class='hs-comment'>-- </span>
<a name="line-15364"></a><span class='hs-comment'>-- All other values in `dense` are set to `default_value`.  If `sparse_values` is a</span>
<a name="line-15365"></a><span class='hs-comment'>-- scalar, all sparse indices are set to this single value.</span>
<a name="line-15366"></a><span class='hs-comment'>-- </span>
<a name="line-15367"></a><span class='hs-comment'>-- Indices should be sorted in lexicographic order, and indices must not</span>
<a name="line-15368"></a><span class='hs-comment'>-- contain any repeats. If `validate_indices` is true, these properties</span>
<a name="line-15369"></a><span class='hs-comment'>-- are checked during execution.</span>
<a name="line-15370"></a><span class='hs-definition'>sparseToDense</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-15371"></a>                                                  <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-15372"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15373"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15374"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __sparse_indices__: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete</span>
<a name="line-15375"></a>                                    <span class='hs-comment'>-- index where `sparse_values[i]` will be placed.</span>
<a name="line-15376"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __output_shape__: 1-D.  Shape of the dense output tensor.</span>
<a name="line-15377"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sparse_values__: 1-D.  Values corresponding to each row of `sparse_indices`,</span>
<a name="line-15378"></a>                                <span class='hs-comment'>-- or a scalar value to be used for all sparse indices.</span>
<a name="line-15379"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __default_value__: Scalar value to set for indices not specified in</span>
<a name="line-15380"></a>                                <span class='hs-comment'>-- `sparse_indices`.</span>
<a name="line-15381"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __dense__: Dense output tensor of shape `output_shape`.</span>
<a name="line-15382"></a><span class='hs-definition'>sparseToDense</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>output_shape</span> <span class='hs-varid'>sparse_values</span>
<a name="line-15383"></a>              <span class='hs-varid'>default_value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15384"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseToDense"</span>
<a name="line-15385"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15386"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15387"></a>        <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>output_shape</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>default_value</span>
<a name="line-15388"></a><span class='hs-comment'>{-
<a name="line-15389"></a>attr {
<a name="line-15390"></a>  default_value { b: true }
<a name="line-15391"></a>  description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats."
<a name="line-15392"></a>  name: "validate_indices"
<a name="line-15393"></a>  type: "bool"
<a name="line-15394"></a>}
<a name="line-15395"></a>attr { name: "T" type: "type" }
<a name="line-15396"></a>attr {
<a name="line-15397"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-15398"></a>  name: "Tindices"
<a name="line-15399"></a>  type: "type"
<a name="line-15400"></a>}
<a name="line-15401"></a>input_arg {
<a name="line-15402"></a>  description: "0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed."
<a name="line-15403"></a>  name: "sparse_indices"
<a name="line-15404"></a>  type_attr: "Tindices"
<a name="line-15405"></a>}
<a name="line-15406"></a>input_arg {
<a name="line-15407"></a>  description: "1-D.  Shape of the dense output tensor."
<a name="line-15408"></a>  name: "output_shape"
<a name="line-15409"></a>  type_attr: "Tindices"
<a name="line-15410"></a>}
<a name="line-15411"></a>input_arg {
<a name="line-15412"></a>  description: "1-D.  Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices."
<a name="line-15413"></a>  name: "sparse_values"
<a name="line-15414"></a>  type_attr: "T"
<a name="line-15415"></a>}
<a name="line-15416"></a>input_arg {
<a name="line-15417"></a>  description: "Scalar value to set for indices not specified in\n`sparse_indices`."
<a name="line-15418"></a>  name: "default_value"
<a name="line-15419"></a>  type_attr: "T"
<a name="line-15420"></a>}
<a name="line-15421"></a>output_arg {
<a name="line-15422"></a>  description: "Dense output tensor of shape `output_shape`."
<a name="line-15423"></a>  name: "dense"
<a name="line-15424"></a>  type_attr: "T"
<a name="line-15425"></a>}
<a name="line-15426"></a>-}</span>
<a name="line-15427"></a>
<a name="line-15428"></a><a name="sparseTensorDenseMatMul"></a><span class='hs-comment'>-- | Multiply SparseTensor (of rank 2) "A" by dense matrix "B".</span>
<a name="line-15429"></a><span class='hs-comment'>--</span>
<a name="line-15430"></a><span class='hs-comment'>-- No validity checking is performed on the indices of A.  However, the following</span>
<a name="line-15431"></a><span class='hs-comment'>-- input format is recommended for optimal behavior:</span>
<a name="line-15432"></a><span class='hs-comment'>-- </span>
<a name="line-15433"></a><span class='hs-comment'>-- if adjoint_a == false:</span>
<a name="line-15434"></a><span class='hs-comment'>--   A should be sorted in lexicographically increasing order.  Use SparseReorder</span>
<a name="line-15435"></a><span class='hs-comment'>--   if you're not sure.</span>
<a name="line-15436"></a><span class='hs-comment'>-- if adjoint_a == true:</span>
<a name="line-15437"></a><span class='hs-comment'>--   A should be sorted in order of increasing dimension 1 (i.e., "column major"</span>
<a name="line-15438"></a><span class='hs-comment'>--   order instead of "row major" order).</span>
<a name="line-15439"></a><span class='hs-definition'>sparseTensorDenseMatMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15440"></a>                           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.</span>
<a name="line-15441"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.</span>
<a name="line-15442"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.</span>
<a name="line-15443"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b__: 2-D.  A dense Matrix.</span>
<a name="line-15444"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __product__</span>
<a name="line-15445"></a><span class='hs-definition'>sparseTensorDenseMatMul</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15446"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseTensorDenseMatMul"</span>
<a name="line-15447"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15448"></a>        <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b</span>
<a name="line-15449"></a><span class='hs-comment'>{-
<a name="line-15450"></a>attr { name: "T" type: "type" }
<a name="line-15451"></a>attr {
<a name="line-15452"></a>  default_value { b: false }
<a name="line-15453"></a>  description: "Use the adjoint of A in the matrix multiply.  If A is complex, this\nis transpose(conj(A)).  Otherwise it\'s transpose(A)."
<a name="line-15454"></a>  name: "adjoint_a"
<a name="line-15455"></a>  type: "bool"
<a name="line-15456"></a>}
<a name="line-15457"></a>attr {
<a name="line-15458"></a>  default_value { b: false }
<a name="line-15459"></a>  description: "Use the adjoint of B in the matrix multiply.  If B is complex, this\nis transpose(conj(B)).  Otherwise it\'s transpose(B)."
<a name="line-15460"></a>  name: "adjoint_b"
<a name="line-15461"></a>  type: "bool"
<a name="line-15462"></a>}
<a name="line-15463"></a>input_arg {
<a name="line-15464"></a>  description: "2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix."
<a name="line-15465"></a>  name: "a_indices"
<a name="line-15466"></a>  type: DT_INT64
<a name="line-15467"></a>}
<a name="line-15468"></a>input_arg {
<a name="line-15469"></a>  description: "1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector."
<a name="line-15470"></a>  name: "a_values"
<a name="line-15471"></a>  type_attr: "T"
<a name="line-15472"></a>}
<a name="line-15473"></a>input_arg {
<a name="line-15474"></a>  description: "1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector."
<a name="line-15475"></a>  name: "a_shape"
<a name="line-15476"></a>  type: DT_INT64
<a name="line-15477"></a>}
<a name="line-15478"></a>input_arg {
<a name="line-15479"></a>  description: "2-D.  A dense Matrix." name: "b" type_attr: "T"
<a name="line-15480"></a>}
<a name="line-15481"></a>output_arg { name: "product" type_attr: "T" }
<a name="line-15482"></a>-}</span>
<a name="line-15483"></a>
<a name="line-15484"></a><a name="mirrorPadGrad"></a><span class='hs-comment'>-- | Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.</span>
<a name="line-15485"></a><span class='hs-comment'>--</span>
<a name="line-15486"></a><span class='hs-comment'>-- This operation folds the padded areas of `input` by `MirrorPad` according to the</span>
<a name="line-15487"></a><span class='hs-comment'>-- `paddings` you specify. `paddings` must be the same as `paddings` argument</span>
<a name="line-15488"></a><span class='hs-comment'>-- given to the corresponding `MirrorPad` op.</span>
<a name="line-15489"></a><span class='hs-comment'>-- </span>
<a name="line-15490"></a><span class='hs-comment'>-- The folded size of each dimension D of the output is:</span>
<a name="line-15491"></a><span class='hs-comment'>-- </span>
<a name="line-15492"></a><span class='hs-comment'>-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`</span>
<a name="line-15493"></a><span class='hs-comment'>-- </span>
<a name="line-15494"></a><span class='hs-comment'>-- For example:</span>
<a name="line-15495"></a><span class='hs-comment'>-- </span>
<a name="line-15496"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-15497"></a><span class='hs-comment'>-- # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].</span>
<a name="line-15498"></a><span class='hs-comment'>-- # 'paddings' is [[0, 1]], [0, 1]].</span>
<a name="line-15499"></a><span class='hs-comment'>-- # 'mode' is SYMMETRIC.</span>
<a name="line-15500"></a><span class='hs-comment'>-- # rank of 't' is 2.</span>
<a name="line-15501"></a><span class='hs-comment'>-- pad(t, paddings) ==&gt; [[ 1,  5]</span>
<a name="line-15502"></a><span class='hs-comment'>--                       [11, 28]]</span>
<a name="line-15503"></a><span class='hs-comment'>-- ```</span>
<a name="line-15504"></a><span class='hs-definition'>mirrorPadGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tpaddings</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>,</span>
<a name="line-15505"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15506"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15507"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The input tensor to be folded.</span>
<a name="line-15508"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tpaddings</span> <span class='hs-comment'>-- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of</span>
<a name="line-15509"></a>                                        <span class='hs-comment'>-- rows must be the same as the rank of `input`.</span>
<a name="line-15510"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The folded tensor.</span>
<a name="line-15511"></a><span class='hs-definition'>mirrorPadGrad</span> <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15512"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MirrorPadGrad"</span>
<a name="line-15513"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15514"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tpaddings"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tpaddings</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15515"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span>
<a name="line-15516"></a><span class='hs-comment'>{-
<a name="line-15517"></a>attr { name: "T" type: "type" }
<a name="line-15518"></a>attr {
<a name="line-15519"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-15520"></a>  default_value { type: DT_INT32 }
<a name="line-15521"></a>  name: "Tpaddings"
<a name="line-15522"></a>  type: "type"
<a name="line-15523"></a>}
<a name="line-15524"></a>attr {
<a name="line-15525"></a>  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
<a name="line-15526"></a>  description: "The mode used in the `MirrorPad` op."
<a name="line-15527"></a>  name: "mode"
<a name="line-15528"></a>  type: "string"
<a name="line-15529"></a>}
<a name="line-15530"></a>input_arg {
<a name="line-15531"></a>  description: "The input tensor to be folded."
<a name="line-15532"></a>  name: "input"
<a name="line-15533"></a>  type_attr: "T"
<a name="line-15534"></a>}
<a name="line-15535"></a>input_arg {
<a name="line-15536"></a>  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
<a name="line-15537"></a>  name: "paddings"
<a name="line-15538"></a>  type_attr: "Tpaddings"
<a name="line-15539"></a>}
<a name="line-15540"></a>output_arg {
<a name="line-15541"></a>  description: "The folded tensor." name: "output" type_attr: "T"
<a name="line-15542"></a>}
<a name="line-15543"></a>-}</span>
<a name="line-15544"></a>
<a name="line-15545"></a><a name="randomShuffle"></a><span class='hs-comment'>-- | Randomly shuffles a tensor along its first dimension.</span>
<a name="line-15546"></a><span class='hs-comment'>--</span>
<a name="line-15547"></a><span class='hs-comment'>--   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped</span>
<a name="line-15548"></a><span class='hs-comment'>--   to one and only one `output[i]`. For example, a mapping that might occur for a</span>
<a name="line-15549"></a><span class='hs-comment'>--   3x2 tensor is:</span>
<a name="line-15550"></a><span class='hs-comment'>-- </span>
<a name="line-15551"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-15552"></a><span class='hs-comment'>-- [[1, 2],       [[5, 6],</span>
<a name="line-15553"></a><span class='hs-comment'>--  [3, 4],  ==&gt;   [1, 2],</span>
<a name="line-15554"></a><span class='hs-comment'>--  [5, 6]]        [3, 4]]</span>
<a name="line-15555"></a><span class='hs-comment'>-- ```</span>
<a name="line-15556"></a><span class='hs-definition'>randomShuffle</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15557"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The tensor to be shuffled.</span>
<a name="line-15558"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first</span>
<a name="line-15559"></a>                 <span class='hs-comment'>-- dimension.</span>
<a name="line-15560"></a><span class='hs-definition'>randomShuffle</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15561"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomShuffle"</span>
<a name="line-15562"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15563"></a>        <span class='hs-varid'>value</span>
<a name="line-15564"></a><span class='hs-comment'>{-
<a name="line-15565"></a>attr {
<a name="line-15566"></a>  default_value { i: 0 }
<a name="line-15567"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-15568"></a>  name: "seed"
<a name="line-15569"></a>  type: "int"
<a name="line-15570"></a>}
<a name="line-15571"></a>attr {
<a name="line-15572"></a>  default_value { i: 0 }
<a name="line-15573"></a>  description: "A second seed to avoid seed collision."
<a name="line-15574"></a>  name: "seed2"
<a name="line-15575"></a>  type: "int"
<a name="line-15576"></a>}
<a name="line-15577"></a>attr { name: "T" type: "type" }
<a name="line-15578"></a>input_arg {
<a name="line-15579"></a>  description: "The tensor to be shuffled."
<a name="line-15580"></a>  name: "value"
<a name="line-15581"></a>  type_attr: "T"
<a name="line-15582"></a>}
<a name="line-15583"></a>output_arg {
<a name="line-15584"></a>  description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension."
<a name="line-15585"></a>  name: "output"
<a name="line-15586"></a>  type_attr: "T"
<a name="line-15587"></a>}
<a name="line-15588"></a>-}</span>
<a name="line-15589"></a>
<a name="line-15590"></a><a name="select"></a><span class='hs-comment'>-- | Selects elements from `t` or `e`, depending on `condition`.</span>
<a name="line-15591"></a><span class='hs-comment'>--</span>
<a name="line-15592"></a><span class='hs-comment'>-- The `t`, and `e` tensors must all have the same shape, and the</span>
<a name="line-15593"></a><span class='hs-comment'>-- output will also have that shape.</span>
<a name="line-15594"></a><span class='hs-comment'>-- </span>
<a name="line-15595"></a><span class='hs-comment'>-- The `condition` tensor must be a scalar if `t` and `e` are scalars.</span>
<a name="line-15596"></a><span class='hs-comment'>-- If `t` and `e` are vectors or higher rank, then `condition` must be either a</span>
<a name="line-15597"></a><span class='hs-comment'>-- scalar, a vector with size matching the first dimension of `t`, or must have</span>
<a name="line-15598"></a><span class='hs-comment'>-- the same shape as `t`.</span>
<a name="line-15599"></a><span class='hs-comment'>-- </span>
<a name="line-15600"></a><span class='hs-comment'>-- The `condition` tensor acts as a mask that chooses, based on the value at each</span>
<a name="line-15601"></a><span class='hs-comment'>-- element, whether the corresponding element / row in the output should be</span>
<a name="line-15602"></a><span class='hs-comment'>-- taken from `t` (if true) or `e` (if false).</span>
<a name="line-15603"></a><span class='hs-comment'>-- </span>
<a name="line-15604"></a><span class='hs-comment'>-- If `condition` is a vector and `t` and `e` are higher rank matrices, then</span>
<a name="line-15605"></a><span class='hs-comment'>-- it chooses which row (outer dimension) to copy from `t` and `e`.</span>
<a name="line-15606"></a><span class='hs-comment'>-- If `condition` has the same shape as `t` and `e`, then it chooses which</span>
<a name="line-15607"></a><span class='hs-comment'>-- element to copy from `t` and `e`.</span>
<a name="line-15608"></a><span class='hs-comment'>-- </span>
<a name="line-15609"></a><span class='hs-comment'>-- For example:</span>
<a name="line-15610"></a><span class='hs-comment'>-- </span>
<a name="line-15611"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-15612"></a><span class='hs-comment'>-- # 'condition' tensor is [[True,  False]</span>
<a name="line-15613"></a><span class='hs-comment'>-- #                        [False, True]]</span>
<a name="line-15614"></a><span class='hs-comment'>-- # 't' is [[1, 2],</span>
<a name="line-15615"></a><span class='hs-comment'>-- #         [3, 4]]</span>
<a name="line-15616"></a><span class='hs-comment'>-- # 'e' is [[5, 6],</span>
<a name="line-15617"></a><span class='hs-comment'>-- #         [7, 8]]</span>
<a name="line-15618"></a><span class='hs-comment'>-- select(condition, t, e) ==&gt; [[1, 6],</span>
<a name="line-15619"></a><span class='hs-comment'>--                              [7, 4]]</span>
<a name="line-15620"></a><span class='hs-comment'>-- </span>
<a name="line-15621"></a><span class='hs-comment'>-- </span>
<a name="line-15622"></a><span class='hs-comment'>-- # 'condition' tensor is [True, False]</span>
<a name="line-15623"></a><span class='hs-comment'>-- # 't' is [[1, 2],</span>
<a name="line-15624"></a><span class='hs-comment'>-- #         [3, 4]]</span>
<a name="line-15625"></a><span class='hs-comment'>-- # 'e' is [[5, 6],</span>
<a name="line-15626"></a><span class='hs-comment'>-- #         [7, 8]]</span>
<a name="line-15627"></a><span class='hs-comment'>-- select(condition, t, e) ==&gt; [[1, 2],</span>
<a name="line-15628"></a><span class='hs-comment'>--                              [7, 8]]</span>
<a name="line-15629"></a><span class='hs-comment'>-- </span>
<a name="line-15630"></a><span class='hs-comment'>-- ```</span>
<a name="line-15631"></a><span class='hs-definition'>select</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15632"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __condition__</span>
<a name="line-15633"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __t__: = A `Tensor` which may have the same shape as `condition`.</span>
<a name="line-15634"></a>                         <span class='hs-comment'>-- If `condition` is rank 1, `t` may have higher rank,</span>
<a name="line-15635"></a>                         <span class='hs-comment'>-- but its first dimension must match the size of `condition`.</span>
<a name="line-15636"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __e__: = A `Tensor` with the same type and shape as `t`.</span>
<a name="line-15637"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`.</span>
<a name="line-15638"></a><span class='hs-definition'>select</span> <span class='hs-varid'>condition</span> <span class='hs-varid'>t</span> <span class='hs-varid'>e</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15639"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Select"</span>
<a name="line-15640"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15641"></a>        <span class='hs-varid'>condition</span> <span class='hs-varid'>t</span> <span class='hs-varid'>e</span>
<a name="line-15642"></a><span class='hs-comment'>{-
<a name="line-15643"></a>attr { name: "T" type: "type" }
<a name="line-15644"></a>input_arg { name: "condition" type: DT_BOOL }
<a name="line-15645"></a>input_arg {
<a name="line-15646"></a>  description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`."
<a name="line-15647"></a>  name: "t"
<a name="line-15648"></a>  type_attr: "T"
<a name="line-15649"></a>}
<a name="line-15650"></a>input_arg {
<a name="line-15651"></a>  description: "= A `Tensor` with the same type and shape as `t`."
<a name="line-15652"></a>  name: "e"
<a name="line-15653"></a>  type_attr: "T"
<a name="line-15654"></a>}
<a name="line-15655"></a>output_arg {
<a name="line-15656"></a>  description: "= A `Tensor` with the same type and shape as `t` and `e`."
<a name="line-15657"></a>  name: "output"
<a name="line-15658"></a>  type_attr: "T"
<a name="line-15659"></a>}
<a name="line-15660"></a>-}</span>
<a name="line-15661"></a>
<a name="line-15662"></a><a name="sparseAddGrad"></a><span class='hs-comment'>-- | The gradient operator for the SparseAdd op.</span>
<a name="line-15663"></a><span class='hs-comment'>--</span>
<a name="line-15664"></a><span class='hs-comment'>-- The SparseAdd op calculates A + B, where A, B, and the sum are all represented</span>
<a name="line-15665"></a><span class='hs-comment'>-- as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.</span>
<a name="line-15666"></a><span class='hs-comment'>-- non-empty values of the sum, and outputs the gradients w.r.t. the non-empty</span>
<a name="line-15667"></a><span class='hs-comment'>-- values of A and B.</span>
<a name="line-15668"></a><span class='hs-definition'>sparseAddGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-15669"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-15670"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-15671"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15672"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-15673"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-15674"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-15675"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15676"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`.  The gradient with respect to</span>
<a name="line-15677"></a>                             <span class='hs-comment'>-- the non-empty values of the sum.</span>
<a name="line-15678"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.</span>
<a name="line-15679"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_indices__: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.</span>
<a name="line-15680"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sum_indices__: 2-D.  The `indices` of the sum `SparseTensor`, size</span>
<a name="line-15681"></a>                                             <span class='hs-comment'>-- `[nnz(sum), ndims]`.</span>
<a name="line-15682"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-15683"></a>                 <span class='hs-comment'>-- ^ (__a_val_grad__, __b_val_grad__)</span>
<a name="line-15684"></a>                 <span class='hs-comment'>--</span>
<a name="line-15685"></a>                 <span class='hs-comment'>-- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the</span>
<a name="line-15686"></a>                 <span class='hs-comment'>-- non-empty values of A.</span>
<a name="line-15687"></a>                 <span class='hs-comment'>--</span>
<a name="line-15688"></a>                 <span class='hs-comment'>-- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the</span>
<a name="line-15689"></a>                 <span class='hs-comment'>-- non-empty values of B.</span>
<a name="line-15690"></a><span class='hs-definition'>sparseAddGrad</span> <span class='hs-varid'>backprop_val_grad</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>b_indices</span>
<a name="line-15691"></a>              <span class='hs-varid'>sum_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15692"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseAddGrad"</span>
<a name="line-15693"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15694"></a>        <span class='hs-varid'>backprop_val_grad</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>sum_indices</span>
<a name="line-15695"></a><span class='hs-comment'>{-
<a name="line-15696"></a>attr {
<a name="line-15697"></a>  allowed_values {
<a name="line-15698"></a>    list {
<a name="line-15699"></a>      type: DT_FLOAT
<a name="line-15700"></a>      type: DT_DOUBLE
<a name="line-15701"></a>      type: DT_INT64
<a name="line-15702"></a>      type: DT_INT32
<a name="line-15703"></a>      type: DT_UINT8
<a name="line-15704"></a>      type: DT_UINT16
<a name="line-15705"></a>      type: DT_INT16
<a name="line-15706"></a>      type: DT_INT8
<a name="line-15707"></a>      type: DT_COMPLEX64
<a name="line-15708"></a>      type: DT_COMPLEX128
<a name="line-15709"></a>      type: DT_QINT8
<a name="line-15710"></a>      type: DT_QUINT8
<a name="line-15711"></a>      type: DT_QINT32
<a name="line-15712"></a>      type: DT_HALF
<a name="line-15713"></a>    }
<a name="line-15714"></a>  }
<a name="line-15715"></a>  name: "T"
<a name="line-15716"></a>  type: "type"
<a name="line-15717"></a>}
<a name="line-15718"></a>input_arg {
<a name="line-15719"></a>  description: "1-D with shape `[nnz(sum)]`.  The gradient with respect to\nthe non-empty values of the sum."
<a name="line-15720"></a>  name: "backprop_val_grad"
<a name="line-15721"></a>  type_attr: "T"
<a name="line-15722"></a>}
<a name="line-15723"></a>input_arg {
<a name="line-15724"></a>  description: "2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`."
<a name="line-15725"></a>  name: "a_indices"
<a name="line-15726"></a>  type: DT_INT64
<a name="line-15727"></a>}
<a name="line-15728"></a>input_arg {
<a name="line-15729"></a>  description: "2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`."
<a name="line-15730"></a>  name: "b_indices"
<a name="line-15731"></a>  type: DT_INT64
<a name="line-15732"></a>}
<a name="line-15733"></a>input_arg {
<a name="line-15734"></a>  description: "2-D.  The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`."
<a name="line-15735"></a>  name: "sum_indices"
<a name="line-15736"></a>  type: DT_INT64
<a name="line-15737"></a>}
<a name="line-15738"></a>output_arg {
<a name="line-15739"></a>  description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A."
<a name="line-15740"></a>  name: "a_val_grad"
<a name="line-15741"></a>  type_attr: "T"
<a name="line-15742"></a>}
<a name="line-15743"></a>output_arg {
<a name="line-15744"></a>  description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B."
<a name="line-15745"></a>  name: "b_val_grad"
<a name="line-15746"></a>  type_attr: "T"
<a name="line-15747"></a>}
<a name="line-15748"></a>-}</span>
<a name="line-15749"></a>
<a name="line-15750"></a><span class='hs-comment'>-- | Computes fingerprints of the input strings.</span>
<a name="line-15751"></a>
<a name="line-15752"></a><a name="sdcaFprint"></a><span class='hs-definition'>sdcaFprint</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: vector of strings to compute fingerprints on.</span>
<a name="line-15753"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input</span>
<a name="line-15754"></a>              <span class='hs-comment'>-- vector. Each row contains the low and high parts of the fingerprint.</span>
<a name="line-15755"></a><span class='hs-definition'>sdcaFprint</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15756"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SdcaFprint"</span><span class='hs-layout'>)</span>
<a name="line-15757"></a>        <span class='hs-varid'>input</span>
<a name="line-15758"></a><span class='hs-comment'>{-
<a name="line-15759"></a>input_arg {
<a name="line-15760"></a>  description: "vector of strings to compute fingerprints on."
<a name="line-15761"></a>  name: "input"
<a name="line-15762"></a>  type: DT_STRING
<a name="line-15763"></a>}
<a name="line-15764"></a>output_arg {
<a name="line-15765"></a>  description: "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint."
<a name="line-15766"></a>  name: "output"
<a name="line-15767"></a>  type: DT_INT64
<a name="line-15768"></a>}
<a name="line-15769"></a>-}</span>
<a name="line-15770"></a>
<a name="line-15771"></a><span class='hs-comment'>-- | </span>
<a name="line-15772"></a>
<a name="line-15773"></a><a name="tensorArrayUnpack"></a><span class='hs-definition'>tensorArrayUnpack</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15774"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-15775"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-15776"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-15777"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __flow_out__</span>
<a name="line-15778"></a><span class='hs-definition'>tensorArrayUnpack</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15779"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayUnpack"</span>
<a name="line-15780"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15781"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span>
<a name="line-15782"></a><span class='hs-comment'>{-
<a name="line-15783"></a>attr { name: "T" type: "type" }
<a name="line-15784"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-15785"></a>input_arg { name: "value" type_attr: "T" }
<a name="line-15786"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-15787"></a>output_arg { name: "flow_out" type: DT_FLOAT }
<a name="line-15788"></a>-}</span>
<a name="line-15789"></a>
<a name="line-15790"></a><span class='hs-comment'>-- | Produces the average pool of the input tensor for quantized types.</span>
<a name="line-15791"></a>
<a name="line-15792"></a><a name="quantizedAvgPool"></a><span class='hs-definition'>quantizedAvgPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-15793"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15794"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-15795"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15796"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-15797"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_input__: The float value that the lowest quantized input value represents.</span>
<a name="line-15798"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_input__: The float value that the highest quantized input value represents.</span>
<a name="line-15799"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-15800"></a>                    <span class='hs-comment'>-- ^ (__output__, __min_output__, __max_output__)</span>
<a name="line-15801"></a>                    <span class='hs-comment'>--</span>
<a name="line-15802"></a>                    <span class='hs-comment'>-- * __output__</span>
<a name="line-15803"></a>                    <span class='hs-comment'>--</span>
<a name="line-15804"></a>                    <span class='hs-comment'>-- * __min_output__: The float value that the lowest quantized output value represents.</span>
<a name="line-15805"></a>                    <span class='hs-comment'>--</span>
<a name="line-15806"></a>                    <span class='hs-comment'>-- * __max_output__: The float value that the highest quantized output value represents.</span>
<a name="line-15807"></a><span class='hs-definition'>quantizedAvgPool</span> <span class='hs-varid'>input</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15808"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedAvgPool"</span>
<a name="line-15809"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15810"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>min_input</span> <span class='hs-varid'>max_input</span>
<a name="line-15811"></a><span class='hs-comment'>{-
<a name="line-15812"></a>attr {
<a name="line-15813"></a>  allowed_values {
<a name="line-15814"></a>    list {
<a name="line-15815"></a>      type: DT_QINT8
<a name="line-15816"></a>      type: DT_QUINT8
<a name="line-15817"></a>      type: DT_QINT16
<a name="line-15818"></a>      type: DT_QUINT16
<a name="line-15819"></a>      type: DT_QINT32
<a name="line-15820"></a>    }
<a name="line-15821"></a>  }
<a name="line-15822"></a>  name: "T"
<a name="line-15823"></a>  type: "type"
<a name="line-15824"></a>}
<a name="line-15825"></a>attr {
<a name="line-15826"></a>  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
<a name="line-15827"></a>  name: "ksize"
<a name="line-15828"></a>  type: "list(int)"
<a name="line-15829"></a>}
<a name="line-15830"></a>attr {
<a name="line-15831"></a>  description: "The stride of the sliding window for each dimension of the input\ntensor.  The length must be 4 to match the number of dimensions of the input."
<a name="line-15832"></a>  name: "strides"
<a name="line-15833"></a>  type: "list(int)"
<a name="line-15834"></a>}
<a name="line-15835"></a>attr {
<a name="line-15836"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-15837"></a>  description: "The type of padding algorithm to use."
<a name="line-15838"></a>  name: "padding"
<a name="line-15839"></a>  type: "string"
<a name="line-15840"></a>}
<a name="line-15841"></a>input_arg {
<a name="line-15842"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-15843"></a>  name: "input"
<a name="line-15844"></a>  type_attr: "T"
<a name="line-15845"></a>}
<a name="line-15846"></a>input_arg {
<a name="line-15847"></a>  description: "The float value that the lowest quantized input value represents."
<a name="line-15848"></a>  name: "min_input"
<a name="line-15849"></a>  type: DT_FLOAT
<a name="line-15850"></a>}
<a name="line-15851"></a>input_arg {
<a name="line-15852"></a>  description: "The float value that the highest quantized input value represents."
<a name="line-15853"></a>  name: "max_input"
<a name="line-15854"></a>  type: DT_FLOAT
<a name="line-15855"></a>}
<a name="line-15856"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-15857"></a>output_arg {
<a name="line-15858"></a>  description: "The float value that the lowest quantized output value represents."
<a name="line-15859"></a>  name: "min_output"
<a name="line-15860"></a>  type: DT_FLOAT
<a name="line-15861"></a>}
<a name="line-15862"></a>output_arg {
<a name="line-15863"></a>  description: "The float value that the highest quantized output value represents."
<a name="line-15864"></a>  name: "max_output"
<a name="line-15865"></a>  type: DT_FLOAT
<a name="line-15866"></a>}
<a name="line-15867"></a>-}</span>
<a name="line-15868"></a>
<a name="line-15869"></a><a name="adjustContrastv2"></a><span class='hs-comment'>-- | Adjust the contrast of one or more images.</span>
<a name="line-15870"></a><span class='hs-comment'>--</span>
<a name="line-15871"></a><span class='hs-comment'>-- `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are</span>
<a name="line-15872"></a><span class='hs-comment'>-- interpreted as `[height, width, channels]`.  The other dimensions only</span>
<a name="line-15873"></a><span class='hs-comment'>-- represent a collection of images, such as `[batch, height, width, channels].`</span>
<a name="line-15874"></a><span class='hs-comment'>-- </span>
<a name="line-15875"></a><span class='hs-comment'>-- Contrast is adjusted independently for each channel of each image.</span>
<a name="line-15876"></a><span class='hs-comment'>-- </span>
<a name="line-15877"></a><span class='hs-comment'>-- For each channel, the Op first computes the mean of the image pixels in the</span>
<a name="line-15878"></a><span class='hs-comment'>-- channel and then adjusts each component of each pixel to</span>
<a name="line-15879"></a><span class='hs-comment'>-- `(x - mean) * contrast_factor + mean`.</span>
<a name="line-15880"></a><span class='hs-definition'>adjustContrastv2</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __images__: Images to adjust.  At least 3-D.</span>
<a name="line-15881"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __contrast_factor__: A float multiplier for adjusting contrast.</span>
<a name="line-15882"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__: The contrast-adjusted image or images.</span>
<a name="line-15883"></a><span class='hs-definition'>adjustContrastv2</span> <span class='hs-varid'>images</span> <span class='hs-varid'>contrast_factor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15884"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AdjustContrastv2"</span><span class='hs-layout'>)</span>
<a name="line-15885"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>contrast_factor</span>
<a name="line-15886"></a><span class='hs-comment'>{-
<a name="line-15887"></a>input_arg {
<a name="line-15888"></a>  description: "Images to adjust.  At least 3-D."
<a name="line-15889"></a>  name: "images"
<a name="line-15890"></a>  type: DT_FLOAT
<a name="line-15891"></a>}
<a name="line-15892"></a>input_arg {
<a name="line-15893"></a>  description: "A float multiplier for adjusting contrast."
<a name="line-15894"></a>  name: "contrast_factor"
<a name="line-15895"></a>  type: DT_FLOAT
<a name="line-15896"></a>}
<a name="line-15897"></a>output_arg {
<a name="line-15898"></a>  description: "The contrast-adjusted image or images."
<a name="line-15899"></a>  name: "output"
<a name="line-15900"></a>  type: DT_FLOAT
<a name="line-15901"></a>}
<a name="line-15902"></a>-}</span>
<a name="line-15903"></a>
<a name="line-15904"></a><a name="resourceGather"></a><span class='hs-comment'>-- | Gather slices from the variable pointed to by `resource` according to `indices`.</span>
<a name="line-15905"></a><span class='hs-comment'>--</span>
<a name="line-15906"></a><span class='hs-comment'>-- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).</span>
<a name="line-15907"></a><span class='hs-comment'>-- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:</span>
<a name="line-15908"></a><span class='hs-comment'>-- </span>
<a name="line-15909"></a><span class='hs-comment'>-- ```python</span>
<a name="line-15910"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-15911"></a><span class='hs-comment'>--     output[:, ..., :] = params[indices, :, ... :]</span>
<a name="line-15912"></a><span class='hs-comment'>-- </span>
<a name="line-15913"></a><span class='hs-comment'>--     # Vector indices</span>
<a name="line-15914"></a><span class='hs-comment'>--     output[i, :, ..., :] = params[indices[i], :, ... :]</span>
<a name="line-15915"></a><span class='hs-comment'>-- </span>
<a name="line-15916"></a><span class='hs-comment'>--     # Higher rank indices</span>
<a name="line-15917"></a><span class='hs-comment'>--     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]</span>
<a name="line-15918"></a><span class='hs-comment'>-- ```</span>
<a name="line-15919"></a><span class='hs-definition'>resourceGather</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-15920"></a>                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-15921"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-15922"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15923"></a>                  <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__</span>
<a name="line-15924"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__</span>
<a name="line-15925"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-15926"></a><span class='hs-definition'>resourceGather</span> <span class='hs-varid'>resource</span> <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15927"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResourceGather"</span>
<a name="line-15928"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-15929"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15930"></a>        <span class='hs-varid'>resource</span> <span class='hs-varid'>indices</span>
<a name="line-15931"></a><span class='hs-comment'>{-
<a name="line-15932"></a>attr {
<a name="line-15933"></a>  default_value { b: true } name: "validate_indices" type: "bool"
<a name="line-15934"></a>}
<a name="line-15935"></a>attr { name: "dtype" type: "type" }
<a name="line-15936"></a>attr {
<a name="line-15937"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-15938"></a>  name: "Tindices"
<a name="line-15939"></a>  type: "type"
<a name="line-15940"></a>}
<a name="line-15941"></a>input_arg { name: "resource" type: DT_RESOURCE }
<a name="line-15942"></a>input_arg { name: "indices" type_attr: "Tindices" }
<a name="line-15943"></a>output_arg { name: "output" type_attr: "dtype" }
<a name="line-15944"></a>-}</span>
<a name="line-15945"></a>
<a name="line-15946"></a><a name="mergeSummary"></a><span class='hs-comment'>-- | Merges summaries.</span>
<a name="line-15947"></a><span class='hs-comment'>--</span>
<a name="line-15948"></a><span class='hs-comment'>-- This op creates a</span>
<a name="line-15949"></a><span class='hs-comment'>-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)</span>
<a name="line-15950"></a><span class='hs-comment'>-- protocol buffer that contains the union of all the values in the input</span>
<a name="line-15951"></a><span class='hs-comment'>-- summaries.</span>
<a name="line-15952"></a><span class='hs-comment'>-- </span>
<a name="line-15953"></a><span class='hs-comment'>-- When the Op is run, it reports an `InvalidArgument` error if multiple values</span>
<a name="line-15954"></a><span class='hs-comment'>-- in the summaries to merge use the same tag.</span>
<a name="line-15955"></a><span class='hs-definition'>mergeSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: Can be of any shape.  Each must contain serialized `Summary` protocol</span>
<a name="line-15956"></a>                                                       <span class='hs-comment'>-- buffers.</span>
<a name="line-15957"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.</span>
<a name="line-15958"></a><span class='hs-definition'>mergeSummary</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15959"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MergeSummary"</span>
<a name="line-15960"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-15961"></a>        <span class='hs-varid'>inputs</span>
<a name="line-15962"></a>  <span class='hs-keyword'>where</span>
<a name="line-15963"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-15964"></a><span class='hs-comment'>{-
<a name="line-15965"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-15966"></a>input_arg {
<a name="line-15967"></a>  description: "Can be of any shape.  Each must contain serialized `Summary` protocol\nbuffers."
<a name="line-15968"></a>  name: "inputs"
<a name="line-15969"></a>  number_attr: "N"
<a name="line-15970"></a>  type: DT_STRING
<a name="line-15971"></a>}
<a name="line-15972"></a>output_arg {
<a name="line-15973"></a>  description: "Scalar. Serialized `Summary` protocol buffer."
<a name="line-15974"></a>  name: "summary"
<a name="line-15975"></a>  type: DT_STRING
<a name="line-15976"></a>}
<a name="line-15977"></a>-}</span>
<a name="line-15978"></a>
<a name="line-15979"></a><span class='hs-comment'>-- | Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.</span>
<a name="line-15980"></a>
<a name="line-15981"></a><a name="serializeSparse"></a><span class='hs-definition'>serializeSparse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-15982"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.</span>
<a name="line-15983"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.</span>
<a name="line-15984"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.</span>
<a name="line-15985"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __serialized_sparse__</span>
<a name="line-15986"></a><span class='hs-definition'>serializeSparse</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>sparse_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-15987"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SerializeSparse"</span>
<a name="line-15988"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-15989"></a>        <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>sparse_shape</span>
<a name="line-15990"></a><span class='hs-comment'>{-
<a name="line-15991"></a>attr { name: "T" type: "type" }
<a name="line-15992"></a>input_arg {
<a name="line-15993"></a>  description: "2-D.  The `indices` of the `SparseTensor`."
<a name="line-15994"></a>  name: "sparse_indices"
<a name="line-15995"></a>  type: DT_INT64
<a name="line-15996"></a>}
<a name="line-15997"></a>input_arg {
<a name="line-15998"></a>  description: "1-D.  The `values` of the `SparseTensor`."
<a name="line-15999"></a>  name: "sparse_values"
<a name="line-16000"></a>  type_attr: "T"
<a name="line-16001"></a>}
<a name="line-16002"></a>input_arg {
<a name="line-16003"></a>  description: "1-D.  The `shape` of the `SparseTensor`."
<a name="line-16004"></a>  name: "sparse_shape"
<a name="line-16005"></a>  type: DT_INT64
<a name="line-16006"></a>}
<a name="line-16007"></a>output_arg { name: "serialized_sparse" type: DT_STRING }
<a name="line-16008"></a>-}</span>
<a name="line-16009"></a>
<a name="line-16010"></a><span class='hs-comment'>-- | Training via negative sampling.</span>
<a name="line-16011"></a>
<a name="line-16012"></a><a name="negTrain"></a><span class='hs-definition'>negTrain</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_negative_samples__: Number of negative samples per example.</span>
<a name="line-16013"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __w_in__: input word embedding.</span>
<a name="line-16014"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __w_out__: output word embedding.</span>
<a name="line-16015"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __examples__: A vector of word ids.</span>
<a name="line-16016"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __labels__: A vector of word ids.</span>
<a name="line-16017"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __lr__</span>
<a name="line-16018"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-16019"></a><span class='hs-definition'>negTrain</span> <span class='hs-varid'>num_negative_samples</span> <span class='hs-varid'>w_in</span> <span class='hs-varid'>w_out</span> <span class='hs-varid'>examples</span> <span class='hs-varid'>labels</span> <span class='hs-varid'>lr</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16020"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"NegTrain"</span>
<a name="line-16021"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_negative_samples"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_negative_samples</span><span class='hs-layout'>)</span>
<a name="line-16022"></a>        <span class='hs-varid'>w_in</span> <span class='hs-varid'>w_out</span> <span class='hs-varid'>examples</span> <span class='hs-varid'>labels</span> <span class='hs-varid'>lr</span>
<a name="line-16023"></a><span class='hs-comment'>{-
<a name="line-16024"></a>attr {
<a name="line-16025"></a>  description: "Count of words in the vocabulary."
<a name="line-16026"></a>  name: "vocab_count"
<a name="line-16027"></a>  type: "list(int)"
<a name="line-16028"></a>}
<a name="line-16029"></a>attr {
<a name="line-16030"></a>  description: "Number of negative samples per example."
<a name="line-16031"></a>  name: "num_negative_samples"
<a name="line-16032"></a>  type: "int"
<a name="line-16033"></a>}
<a name="line-16034"></a>input_arg {
<a name="line-16035"></a>  description: "input word embedding."
<a name="line-16036"></a>  is_ref: true
<a name="line-16037"></a>  name: "w_in"
<a name="line-16038"></a>  type: DT_FLOAT
<a name="line-16039"></a>}
<a name="line-16040"></a>input_arg {
<a name="line-16041"></a>  description: "output word embedding."
<a name="line-16042"></a>  is_ref: true
<a name="line-16043"></a>  name: "w_out"
<a name="line-16044"></a>  type: DT_FLOAT
<a name="line-16045"></a>}
<a name="line-16046"></a>input_arg {
<a name="line-16047"></a>  description: "A vector of word ids."
<a name="line-16048"></a>  name: "examples"
<a name="line-16049"></a>  type: DT_INT32
<a name="line-16050"></a>}
<a name="line-16051"></a>input_arg {
<a name="line-16052"></a>  description: "A vector of word ids." name: "labels" type: DT_INT32
<a name="line-16053"></a>}
<a name="line-16054"></a>input_arg { name: "lr" type: DT_FLOAT }
<a name="line-16055"></a>-}</span>
<a name="line-16056"></a>
<a name="line-16057"></a><a name="tensorArrayCloseV2"></a><span class='hs-comment'>-- | Delete the TensorArray from its resource container.  This enables</span>
<a name="line-16058"></a><span class='hs-comment'>--</span>
<a name="line-16059"></a><span class='hs-comment'>-- the user to close and release the resource in the middle of a step/run.</span>
<a name="line-16060"></a><span class='hs-definition'>tensorArrayCloseV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).</span>
<a name="line-16061"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>ControlNode</span>
<a name="line-16062"></a><span class='hs-definition'>tensorArrayCloseV2</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16063"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayCloseV2"</span><span class='hs-layout'>)</span>
<a name="line-16064"></a>        <span class='hs-varid'>handle</span>
<a name="line-16065"></a><span class='hs-comment'>{-
<a name="line-16066"></a>input_arg {
<a name="line-16067"></a>  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
<a name="line-16068"></a>  name: "handle"
<a name="line-16069"></a>  type: DT_STRING
<a name="line-16070"></a>}
<a name="line-16071"></a>-}</span>
<a name="line-16072"></a>
<a name="line-16073"></a><a name="threadUnsafeUnigramCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a learned unigram distribution.</span>
<a name="line-16074"></a><span class='hs-comment'>--</span>
<a name="line-16075"></a><span class='hs-comment'>-- See explanations of candidate sampling and the data formats at</span>
<a name="line-16076"></a><span class='hs-comment'>-- go/candidate-sampling.</span>
<a name="line-16077"></a><span class='hs-comment'>-- </span>
<a name="line-16078"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-16079"></a><span class='hs-comment'>-- </span>
<a name="line-16080"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-16081"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-16082"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-16083"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-16084"></a><span class='hs-definition'>threadUnsafeUnigramCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to randomly sample per batch.</span>
<a name="line-16085"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-16086"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).</span>
<a name="line-16087"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-16088"></a>                                               <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-16089"></a>                                               <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-16090"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-16091"></a>                                                                   <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-16092"></a>                                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-16093"></a>                                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-16094"></a>                                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-16095"></a>                                       <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-16096"></a>                                       <span class='hs-comment'>--</span>
<a name="line-16097"></a>                                       <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-16098"></a>                                       <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-16099"></a>                                       <span class='hs-comment'>--</span>
<a name="line-16100"></a>                                       <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-16101"></a>                                       <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-16102"></a>                                       <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-16103"></a>                                       <span class='hs-comment'>--</span>
<a name="line-16104"></a>                                       <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-16105"></a>                                       <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-16106"></a>                                       <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-16107"></a>                                       <span class='hs-comment'>-- probability.</span>
<a name="line-16108"></a><span class='hs-definition'>threadUnsafeUnigramCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>range_max</span> <span class='hs-varid'>unique</span>
<a name="line-16109"></a>                                    <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16110"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ThreadUnsafeUnigramCandidateSampler"</span>
<a name="line-16111"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-16112"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-16113"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"range_max"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>range_max</span>
<a name="line-16114"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-16115"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-16116"></a><span class='hs-comment'>{-
<a name="line-16117"></a>attr {
<a name="line-16118"></a>  description: "Number of true labels per context."
<a name="line-16119"></a>  has_minimum: true
<a name="line-16120"></a>  minimum: 1
<a name="line-16121"></a>  name: "num_true"
<a name="line-16122"></a>  type: "int"
<a name="line-16123"></a>}
<a name="line-16124"></a>attr {
<a name="line-16125"></a>  description: "Number of candidates to randomly sample per batch."
<a name="line-16126"></a>  has_minimum: true
<a name="line-16127"></a>  minimum: 1
<a name="line-16128"></a>  name: "num_sampled"
<a name="line-16129"></a>  type: "int"
<a name="line-16130"></a>}
<a name="line-16131"></a>attr {
<a name="line-16132"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-16133"></a>  name: "unique"
<a name="line-16134"></a>  type: "bool"
<a name="line-16135"></a>}
<a name="line-16136"></a>attr {
<a name="line-16137"></a>  description: "The sampler will sample integers from the interval [0, range_max)."
<a name="line-16138"></a>  has_minimum: true
<a name="line-16139"></a>  minimum: 1
<a name="line-16140"></a>  name: "range_max"
<a name="line-16141"></a>  type: "int"
<a name="line-16142"></a>}
<a name="line-16143"></a>attr {
<a name="line-16144"></a>  default_value { i: 0 }
<a name="line-16145"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-16146"></a>  name: "seed"
<a name="line-16147"></a>  type: "int"
<a name="line-16148"></a>}
<a name="line-16149"></a>attr {
<a name="line-16150"></a>  default_value { i: 0 }
<a name="line-16151"></a>  description: "An second seed to avoid seed collision."
<a name="line-16152"></a>  name: "seed2"
<a name="line-16153"></a>  type: "int"
<a name="line-16154"></a>}
<a name="line-16155"></a>input_arg {
<a name="line-16156"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-16157"></a>  name: "true_classes"
<a name="line-16158"></a>  type: DT_INT64
<a name="line-16159"></a>}
<a name="line-16160"></a>output_arg {
<a name="line-16161"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-16162"></a>  name: "sampled_candidates"
<a name="line-16163"></a>  type: DT_INT64
<a name="line-16164"></a>}
<a name="line-16165"></a>output_arg {
<a name="line-16166"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-16167"></a>  name: "true_expected_count"
<a name="line-16168"></a>  type: DT_FLOAT
<a name="line-16169"></a>}
<a name="line-16170"></a>output_arg {
<a name="line-16171"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-16172"></a>  name: "sampled_expected_count"
<a name="line-16173"></a>  type: DT_FLOAT
<a name="line-16174"></a>}
<a name="line-16175"></a>-}</span>
<a name="line-16176"></a>
<a name="line-16177"></a><a name="stringToNumber"></a><span class='hs-comment'>-- | Converts each string in the input Tensor to the specified numeric type.</span>
<a name="line-16178"></a><span class='hs-comment'>--</span>
<a name="line-16179"></a><span class='hs-comment'>-- (Note that int32 overflow results in an error while float overflow</span>
<a name="line-16180"></a><span class='hs-comment'>-- results in a rounded value.)</span>
<a name="line-16181"></a><span class='hs-definition'>stringToNumber</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-16182"></a>                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-16183"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16184"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __string_tensor__</span>
<a name="line-16185"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span> <span class='hs-comment'>-- ^ __output__: A Tensor of the same shape as the input `string_tensor`.</span>
<a name="line-16186"></a><span class='hs-definition'>stringToNumber</span> <span class='hs-varid'>string_tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16187"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringToNumber"</span>
<a name="line-16188"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16189"></a>        <span class='hs-varid'>string_tensor</span>
<a name="line-16190"></a><span class='hs-comment'>{-
<a name="line-16191"></a>attr {
<a name="line-16192"></a>  allowed_values { list { type: DT_FLOAT type: DT_INT32 } }
<a name="line-16193"></a>  default_value { type: DT_FLOAT }
<a name="line-16194"></a>  description: "The numeric type to interpret each string in `string_tensor` as."
<a name="line-16195"></a>  name: "out_type"
<a name="line-16196"></a>  type: "type"
<a name="line-16197"></a>}
<a name="line-16198"></a>input_arg { name: "string_tensor" type: DT_STRING }
<a name="line-16199"></a>output_arg {
<a name="line-16200"></a>  description: "A Tensor of the same shape as the input `string_tensor`."
<a name="line-16201"></a>  name: "output"
<a name="line-16202"></a>  type_attr: "out_type"
<a name="line-16203"></a>}
<a name="line-16204"></a>-}</span>
<a name="line-16205"></a>
<a name="line-16206"></a><a name="cTCBeamSearchDecoder"></a><span class='hs-comment'>-- | Performs beam search decoding on the logits given in input.</span>
<a name="line-16207"></a><span class='hs-comment'>--</span>
<a name="line-16208"></a><span class='hs-comment'>-- A note about the attribute merge_repeated: For the beam search decoder,</span>
<a name="line-16209"></a><span class='hs-comment'>-- this means that if consecutive entries in a beam are the same, only</span>
<a name="line-16210"></a><span class='hs-comment'>-- the first of these is emitted.  That is, when the top path is "A B B B B",</span>
<a name="line-16211"></a><span class='hs-comment'>-- "A B" is returned if merge_repeated = True but "A B B B B" is</span>
<a name="line-16212"></a><span class='hs-comment'>-- returned if merge_repeated = False.</span>
<a name="line-16213"></a><span class='hs-definition'>cTCBeamSearchDecoder</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __beam_width__: A scalar &gt;= 0 (beam search beam width).</span>
<a name="line-16214"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __top_paths__: A scalar &gt;= 0, &lt;= beam_width (controls output size).</span>
<a name="line-16215"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.</span>
<a name="line-16216"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`.</span>
<a name="line-16217"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span>
<a name="line-16218"></a>                            <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span>
<a name="line-16219"></a>                            <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-16220"></a>                        <span class='hs-comment'>-- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)</span>
<a name="line-16221"></a>                        <span class='hs-comment'>--</span>
<a name="line-16222"></a>                        <span class='hs-comment'>-- * __decoded_indices__: A list (length: top_paths) of indices matrices.  Matrix j,</span>
<a name="line-16223"></a>                        <span class='hs-comment'>-- size `(total_decoded_outputs[j] x 2)`, has indices of a</span>
<a name="line-16224"></a>                        <span class='hs-comment'>-- `SparseTensor&lt;int64, 2&gt;`.  The rows store: [batch, time].</span>
<a name="line-16225"></a>                        <span class='hs-comment'>--</span>
<a name="line-16226"></a>                        <span class='hs-comment'>-- * __decoded_values__: A list (length: top_paths) of values vectors.  Vector j,</span>
<a name="line-16227"></a>                        <span class='hs-comment'>-- size `(length total_decoded_outputs[j])`, has the values of a</span>
<a name="line-16228"></a>                        <span class='hs-comment'>-- `SparseTensor&lt;int64, 2&gt;`.  The vector stores the decoded classes for beam j.</span>
<a name="line-16229"></a>                        <span class='hs-comment'>--</span>
<a name="line-16230"></a>                        <span class='hs-comment'>-- * __decoded_shape__: A list (length: top_paths) of shape vector.  Vector j,</span>
<a name="line-16231"></a>                        <span class='hs-comment'>-- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.</span>
<a name="line-16232"></a>                        <span class='hs-comment'>-- Its values are: `[batch_size, max_decoded_length[j]]`.</span>
<a name="line-16233"></a>                        <span class='hs-comment'>--</span>
<a name="line-16234"></a>                        <span class='hs-comment'>-- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`.  The</span>
<a name="line-16235"></a>                        <span class='hs-comment'>-- sequence log-probabilities.</span>
<a name="line-16236"></a><span class='hs-definition'>cTCBeamSearchDecoder</span> <span class='hs-varid'>beam_width</span> <span class='hs-varid'>top_paths</span> <span class='hs-varid'>inputs</span>
<a name="line-16237"></a>                     <span class='hs-varid'>sequence_length</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16238"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>top_paths</span><span class='hs-layout'>,</span> <span class='hs-varid'>top_paths</span><span class='hs-layout'>,</span> <span class='hs-varid'>top_paths</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CTCBeamSearchDecoder"</span>
<a name="line-16239"></a>                                                   <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"beam_width"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>beam_width</span>
<a name="line-16240"></a>                                                   <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"top_paths"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>top_paths</span><span class='hs-layout'>)</span>
<a name="line-16241"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>sequence_length</span>
<a name="line-16242"></a><span class='hs-comment'>{-
<a name="line-16243"></a>attr {
<a name="line-16244"></a>  description: "A scalar &gt;= 0 (beam search beam width)."
<a name="line-16245"></a>  has_minimum: true
<a name="line-16246"></a>  minimum: 1
<a name="line-16247"></a>  name: "beam_width"
<a name="line-16248"></a>  type: "int"
<a name="line-16249"></a>}
<a name="line-16250"></a>attr {
<a name="line-16251"></a>  description: "A scalar &gt;= 0, &lt;= beam_width (controls output size)."
<a name="line-16252"></a>  has_minimum: true
<a name="line-16253"></a>  minimum: 1
<a name="line-16254"></a>  name: "top_paths"
<a name="line-16255"></a>  type: "int"
<a name="line-16256"></a>}
<a name="line-16257"></a>attr {
<a name="line-16258"></a>  default_value { b: true }
<a name="line-16259"></a>  description: "If true, merge repeated classes in output."
<a name="line-16260"></a>  name: "merge_repeated"
<a name="line-16261"></a>  type: "bool"
<a name="line-16262"></a>}
<a name="line-16263"></a>input_arg {
<a name="line-16264"></a>  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
<a name="line-16265"></a>  name: "inputs"
<a name="line-16266"></a>  type: DT_FLOAT
<a name="line-16267"></a>}
<a name="line-16268"></a>input_arg {
<a name="line-16269"></a>  description: "A vector containing sequence lengths, size `(batch)`."
<a name="line-16270"></a>  name: "sequence_length"
<a name="line-16271"></a>  type: DT_INT32
<a name="line-16272"></a>}
<a name="line-16273"></a>output_arg {
<a name="line-16274"></a>  description: "A list (length: top_paths) of indices matrices.  Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor&lt;int64, 2&gt;`.  The rows store: [batch, time]."
<a name="line-16275"></a>  name: "decoded_indices"
<a name="line-16276"></a>  number_attr: "top_paths"
<a name="line-16277"></a>  type: DT_INT64
<a name="line-16278"></a>}
<a name="line-16279"></a>output_arg {
<a name="line-16280"></a>  description: "A list (length: top_paths) of values vectors.  Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor&lt;int64, 2&gt;`.  The vector stores the decoded classes for beam j."
<a name="line-16281"></a>  name: "decoded_values"
<a name="line-16282"></a>  number_attr: "top_paths"
<a name="line-16283"></a>  type: DT_INT64
<a name="line-16284"></a>}
<a name="line-16285"></a>output_arg {
<a name="line-16286"></a>  description: "A list (length: top_paths) of shape vector.  Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`."
<a name="line-16287"></a>  name: "decoded_shape"
<a name="line-16288"></a>  number_attr: "top_paths"
<a name="line-16289"></a>  type: DT_INT64
<a name="line-16290"></a>}
<a name="line-16291"></a>output_arg {
<a name="line-16292"></a>  description: "A matrix, shaped: `(batch_size x top_paths)`.  The\nsequence log-probabilities."
<a name="line-16293"></a>  name: "log_probability"
<a name="line-16294"></a>  type: DT_FLOAT
<a name="line-16295"></a>}
<a name="line-16296"></a>-}</span>
<a name="line-16297"></a>
<a name="line-16298"></a><span class='hs-comment'>-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor.</span>
<a name="line-16299"></a>
<a name="line-16300"></a><a name="parseTensor"></a><span class='hs-definition'>parseTensor</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16301"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __serialized__: A scalar string containing a serialized TensorProto proto.</span>
<a name="line-16302"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span> <span class='hs-comment'>-- ^ __output__: A Tensor of type `out_type`.</span>
<a name="line-16303"></a><span class='hs-definition'>parseTensor</span> <span class='hs-varid'>serialized</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16304"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ParseTensor"</span>
<a name="line-16305"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16306"></a>        <span class='hs-varid'>serialized</span>
<a name="line-16307"></a><span class='hs-comment'>{-
<a name="line-16308"></a>attr {
<a name="line-16309"></a>  description: "The type of the serialized tensor.  The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place."
<a name="line-16310"></a>  name: "out_type"
<a name="line-16311"></a>  type: "type"
<a name="line-16312"></a>}
<a name="line-16313"></a>input_arg {
<a name="line-16314"></a>  description: "A scalar string containing a serialized TensorProto proto."
<a name="line-16315"></a>  name: "serialized"
<a name="line-16316"></a>  type: DT_STRING
<a name="line-16317"></a>}
<a name="line-16318"></a>output_arg {
<a name="line-16319"></a>  description: "A Tensor of type `out_type`."
<a name="line-16320"></a>  name: "output"
<a name="line-16321"></a>  type_attr: "out_type"
<a name="line-16322"></a>}
<a name="line-16323"></a>-}</span>
<a name="line-16324"></a>
<a name="line-16325"></a><a name="imageSummary"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with images.</span>
<a name="line-16326"></a><span class='hs-comment'>--</span>
<a name="line-16327"></a><span class='hs-comment'>-- The summary has up to `max_images` summary values containing images. The</span>
<a name="line-16328"></a><span class='hs-comment'>-- images are built from `tensor` which must be 4-D with shape `[batch_size,</span>
<a name="line-16329"></a><span class='hs-comment'>-- height, width, channels]` and where `channels` can be:</span>
<a name="line-16330"></a><span class='hs-comment'>-- </span>
<a name="line-16331"></a><span class='hs-comment'>-- *  1: `tensor` is interpreted as Grayscale.</span>
<a name="line-16332"></a><span class='hs-comment'>-- *  3: `tensor` is interpreted as RGB.</span>
<a name="line-16333"></a><span class='hs-comment'>-- *  4: `tensor` is interpreted as RGBA.</span>
<a name="line-16334"></a><span class='hs-comment'>-- </span>
<a name="line-16335"></a><span class='hs-comment'>-- The images have the same number of channels as the input tensor. For float</span>
<a name="line-16336"></a><span class='hs-comment'>-- input, the values are normalized one image at a time to fit in the range</span>
<a name="line-16337"></a><span class='hs-comment'>-- `[0, 255]`.  `uint8` values are unchanged.  The op uses two different</span>
<a name="line-16338"></a><span class='hs-comment'>-- normalization algorithms:</span>
<a name="line-16339"></a><span class='hs-comment'>-- </span>
<a name="line-16340"></a><span class='hs-comment'>-- *  If the input values are all positive, they are rescaled so the largest one</span>
<a name="line-16341"></a><span class='hs-comment'>--    is 255.</span>
<a name="line-16342"></a><span class='hs-comment'>-- </span>
<a name="line-16343"></a><span class='hs-comment'>-- *  If any input value is negative, the values are shifted so input value 0.0</span>
<a name="line-16344"></a><span class='hs-comment'>--    is at 127.  They are then rescaled so that either the smallest value is 0,</span>
<a name="line-16345"></a><span class='hs-comment'>--    or the largest one is 255.</span>
<a name="line-16346"></a><span class='hs-comment'>-- </span>
<a name="line-16347"></a><span class='hs-comment'>-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to</span>
<a name="line-16348"></a><span class='hs-comment'>-- build the `tag` of the summary values:</span>
<a name="line-16349"></a><span class='hs-comment'>-- </span>
<a name="line-16350"></a><span class='hs-comment'>-- *  If `max_images` is 1, the summary value tag is '*tag*/image'.</span>
<a name="line-16351"></a><span class='hs-comment'>-- *  If `max_images` is greater than 1, the summary value tags are</span>
<a name="line-16352"></a><span class='hs-comment'>--    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.</span>
<a name="line-16353"></a><span class='hs-comment'>-- </span>
<a name="line-16354"></a><span class='hs-comment'>-- The `bad_color` argument is the color to use in the generated images for</span>
<a name="line-16355"></a><span class='hs-comment'>-- non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.</span>
<a name="line-16356"></a><span class='hs-comment'>-- Each element must be in the range `[0, 255]` (It represents the value of a</span>
<a name="line-16357"></a><span class='hs-comment'>-- pixel in the output image).  Non-finite values in the input tensor are</span>
<a name="line-16358"></a><span class='hs-comment'>-- replaced by this tensor in the output image.  The default value is the color</span>
<a name="line-16359"></a><span class='hs-comment'>-- red.</span>
<a name="line-16360"></a><span class='hs-definition'>imageSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-16361"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-16362"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16363"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.</span>
<a name="line-16364"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where</span>
<a name="line-16365"></a>                               <span class='hs-comment'>-- `channels` is 1, 3, or 4.</span>
<a name="line-16366"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.</span>
<a name="line-16367"></a><span class='hs-definition'>imageSummary</span> <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16368"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ImageSummary"</span>
<a name="line-16369"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16370"></a>        <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span>
<a name="line-16371"></a><span class='hs-comment'>{-
<a name="line-16372"></a>attr {
<a name="line-16373"></a>  default_value { i: 3 }
<a name="line-16374"></a>  description: "Max number of batch elements to generate images for."
<a name="line-16375"></a>  has_minimum: true
<a name="line-16376"></a>  minimum: 1
<a name="line-16377"></a>  name: "max_images"
<a name="line-16378"></a>  type: "int"
<a name="line-16379"></a>}
<a name="line-16380"></a>attr {
<a name="line-16381"></a>  allowed_values {
<a name="line-16382"></a>    list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF }
<a name="line-16383"></a>  }
<a name="line-16384"></a>  default_value { type: DT_FLOAT }
<a name="line-16385"></a>  name: "T"
<a name="line-16386"></a>  type: "type"
<a name="line-16387"></a>}
<a name="line-16388"></a>attr {
<a name="line-16389"></a>  default_value {
<a name="line-16390"></a>    tensor {
<a name="line-16391"></a>      dtype: DT_UINT8
<a name="line-16392"></a>      int_val: 255
<a name="line-16393"></a>      int_val: 0
<a name="line-16394"></a>      int_val: 0
<a name="line-16395"></a>      int_val: 255
<a name="line-16396"></a>      tensor_shape { dim { size: 4 } }
<a name="line-16397"></a>    }
<a name="line-16398"></a>  }
<a name="line-16399"></a>  description: "Color to use for pixels with non-finite values."
<a name="line-16400"></a>  name: "bad_color"
<a name="line-16401"></a>  type: "tensor"
<a name="line-16402"></a>}
<a name="line-16403"></a>input_arg {
<a name="line-16404"></a>  description: "Scalar. Used to build the `tag` attribute of the summary values."
<a name="line-16405"></a>  name: "tag"
<a name="line-16406"></a>  type: DT_STRING
<a name="line-16407"></a>}
<a name="line-16408"></a>input_arg {
<a name="line-16409"></a>  description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4."
<a name="line-16410"></a>  name: "tensor"
<a name="line-16411"></a>  type_attr: "T"
<a name="line-16412"></a>}
<a name="line-16413"></a>output_arg {
<a name="line-16414"></a>  description: "Scalar. Serialized `Summary` protocol buffer."
<a name="line-16415"></a>  name: "summary"
<a name="line-16416"></a>  type: DT_STRING
<a name="line-16417"></a>}
<a name="line-16418"></a>-}</span>
<a name="line-16419"></a>
<a name="line-16420"></a><a name="truncateDiv"></a><span class='hs-comment'>-- | Returns x / y element-wise for integer types.</span>
<a name="line-16421"></a><span class='hs-comment'>--</span>
<a name="line-16422"></a><span class='hs-comment'>-- Truncation designates that negative numbers will round fractional quantities</span>
<a name="line-16423"></a><span class='hs-comment'>-- toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different</span>
<a name="line-16424"></a><span class='hs-comment'>-- than Python semantics. See `FloorDiv` for a division function that matches</span>
<a name="line-16425"></a><span class='hs-comment'>-- Python Semantics.</span>
<a name="line-16426"></a><span class='hs-comment'>-- </span>
<a name="line-16427"></a><span class='hs-comment'>-- *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting</span>
<a name="line-16428"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-16429"></a><span class='hs-definition'>truncateDiv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-16430"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16431"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16432"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-16433"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-16434"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-16435"></a>                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16436"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-16437"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-16438"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-16439"></a><span class='hs-definition'>truncateDiv</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16440"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TruncateDiv"</span>
<a name="line-16441"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16442"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-16443"></a><span class='hs-comment'>{-
<a name="line-16444"></a>attr {
<a name="line-16445"></a>  allowed_values {
<a name="line-16446"></a>    list {
<a name="line-16447"></a>      type: DT_HALF
<a name="line-16448"></a>      type: DT_FLOAT
<a name="line-16449"></a>      type: DT_DOUBLE
<a name="line-16450"></a>      type: DT_UINT8
<a name="line-16451"></a>      type: DT_INT8
<a name="line-16452"></a>      type: DT_UINT16
<a name="line-16453"></a>      type: DT_INT16
<a name="line-16454"></a>      type: DT_INT32
<a name="line-16455"></a>      type: DT_INT64
<a name="line-16456"></a>      type: DT_COMPLEX64
<a name="line-16457"></a>      type: DT_COMPLEX128
<a name="line-16458"></a>    }
<a name="line-16459"></a>  }
<a name="line-16460"></a>  name: "T"
<a name="line-16461"></a>  type: "type"
<a name="line-16462"></a>}
<a name="line-16463"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-16464"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-16465"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-16466"></a>-}</span>
<a name="line-16467"></a>
<a name="line-16468"></a><a name="cholesky"></a><span class='hs-comment'>-- | Computes the Cholesky decomposition of one or more square matrices.</span>
<a name="line-16469"></a><span class='hs-comment'>--</span>
<a name="line-16470"></a><span class='hs-comment'>-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions</span>
<a name="line-16471"></a><span class='hs-comment'>-- form square matrices, with the same constraints as the single matrix Cholesky</span>
<a name="line-16472"></a><span class='hs-comment'>-- decomposition above. The output is a tensor of the same shape as the input</span>
<a name="line-16473"></a><span class='hs-comment'>-- containing the Cholesky decompositions for all input submatrices `[..., :, :]`.</span>
<a name="line-16474"></a><span class='hs-definition'>cholesky</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16475"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape is `[..., M, M]`.</span>
<a name="line-16476"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., M, M]`.</span>
<a name="line-16477"></a><span class='hs-definition'>cholesky</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16478"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cholesky"</span>
<a name="line-16479"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16480"></a>        <span class='hs-varid'>input</span>
<a name="line-16481"></a><span class='hs-comment'>{-
<a name="line-16482"></a>attr {
<a name="line-16483"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-16484"></a>  name: "T"
<a name="line-16485"></a>  type: "type"
<a name="line-16486"></a>}
<a name="line-16487"></a>input_arg {
<a name="line-16488"></a>  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
<a name="line-16489"></a>}
<a name="line-16490"></a>output_arg {
<a name="line-16491"></a>  description: "Shape is `[..., M, M]`."
<a name="line-16492"></a>  name: "output"
<a name="line-16493"></a>  type_attr: "T"
<a name="line-16494"></a>}
<a name="line-16495"></a>-}</span>
<a name="line-16496"></a>
<a name="line-16497"></a><span class='hs-comment'>-- | </span>
<a name="line-16498"></a>
<a name="line-16499"></a><a name="batchMatrixSolveLs"></a><span class='hs-definition'>batchMatrixSolveLs</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-16500"></a>                                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16501"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__</span>
<a name="line-16502"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__</span>
<a name="line-16503"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Double</span> <span class='hs-comment'>-- ^ __l2_regularizer__</span>
<a name="line-16504"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-16505"></a><span class='hs-definition'>batchMatrixSolveLs</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-varid'>l2_regularizer</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16506"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixSolveLs"</span>
<a name="line-16507"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16508"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-varid'>l2_regularizer</span>
<a name="line-16509"></a><span class='hs-comment'>{-
<a name="line-16510"></a>attr {
<a name="line-16511"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-16512"></a>  name: "T"
<a name="line-16513"></a>  type: "type"
<a name="line-16514"></a>}
<a name="line-16515"></a>attr { default_value { b: true } name: "fast" type: "bool" }
<a name="line-16516"></a>input_arg { name: "matrix" type_attr: "T" }
<a name="line-16517"></a>input_arg { name: "rhs" type_attr: "T" }
<a name="line-16518"></a>input_arg { name: "l2_regularizer" type: DT_DOUBLE }
<a name="line-16519"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-16520"></a>-}</span>
<a name="line-16521"></a>
<a name="line-16522"></a><span class='hs-comment'>-- | Outputs all keys and values in the table.</span>
<a name="line-16523"></a>
<a name="line-16524"></a><a name="lookupTableExport"></a><span class='hs-definition'>lookupTableExport</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>tkeys</span> <span class='hs-varid'>tvalues</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tkeys</span><span class='hs-layout'>,</span>
<a name="line-16525"></a>                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tvalues</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16526"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to the table.</span>
<a name="line-16527"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tkeys</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tvalues</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16528"></a>                     <span class='hs-comment'>-- ^ (__keys__, __values__)</span>
<a name="line-16529"></a>                     <span class='hs-comment'>--</span>
<a name="line-16530"></a>                     <span class='hs-comment'>-- * __keys__: Vector of all keys present in the table.</span>
<a name="line-16531"></a>                     <span class='hs-comment'>--</span>
<a name="line-16532"></a>                     <span class='hs-comment'>-- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.</span>
<a name="line-16533"></a><span class='hs-definition'>lookupTableExport</span> <span class='hs-varid'>table_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16534"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LookupTableExport"</span>
<a name="line-16535"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tkeys"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tkeys</span><span class='hs-layout'>)</span>
<a name="line-16536"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tvalues"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tvalues</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16537"></a>        <span class='hs-varid'>table_handle</span>
<a name="line-16538"></a><span class='hs-comment'>{-
<a name="line-16539"></a>attr { name: "Tkeys" type: "type" }
<a name="line-16540"></a>attr { name: "Tvalues" type: "type" }
<a name="line-16541"></a>input_arg {
<a name="line-16542"></a>  description: "Handle to the table."
<a name="line-16543"></a>  is_ref: true
<a name="line-16544"></a>  name: "table_handle"
<a name="line-16545"></a>  type: DT_STRING
<a name="line-16546"></a>}
<a name="line-16547"></a>output_arg {
<a name="line-16548"></a>  description: "Vector of all keys present in the table."
<a name="line-16549"></a>  name: "keys"
<a name="line-16550"></a>  type_attr: "Tkeys"
<a name="line-16551"></a>}
<a name="line-16552"></a>output_arg {
<a name="line-16553"></a>  description: "Tensor of all values in the table. Indexed in parallel with `keys`."
<a name="line-16554"></a>  name: "values"
<a name="line-16555"></a>  type_attr: "Tvalues"
<a name="line-16556"></a>}
<a name="line-16557"></a>-}</span>
<a name="line-16558"></a>
<a name="line-16559"></a><span class='hs-comment'>-- | </span>
<a name="line-16560"></a>
<a name="line-16561"></a><a name="batchSvd"></a><span class='hs-definition'>batchSvd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16562"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16563"></a>                                                 <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16564"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-16565"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-16566"></a>            <span class='hs-comment'>-- ^ (__s__, __u__, __v__)</span>
<a name="line-16567"></a>            <span class='hs-comment'>--</span>
<a name="line-16568"></a>            <span class='hs-comment'>-- * __s__</span>
<a name="line-16569"></a>            <span class='hs-comment'>--</span>
<a name="line-16570"></a>            <span class='hs-comment'>-- * __u__</span>
<a name="line-16571"></a>            <span class='hs-comment'>--</span>
<a name="line-16572"></a>            <span class='hs-comment'>-- * __v__</span>
<a name="line-16573"></a><span class='hs-definition'>batchSvd</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16574"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchSvd"</span>
<a name="line-16575"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16576"></a>        <span class='hs-varid'>input</span>
<a name="line-16577"></a><span class='hs-comment'>{-
<a name="line-16578"></a>attr { default_value { b: true } name: "compute_uv" type: "bool" }
<a name="line-16579"></a>attr {
<a name="line-16580"></a>  default_value { b: false } name: "full_matrices" type: "bool"
<a name="line-16581"></a>}
<a name="line-16582"></a>attr {
<a name="line-16583"></a>  allowed_values {
<a name="line-16584"></a>    list {
<a name="line-16585"></a>      type: DT_DOUBLE
<a name="line-16586"></a>      type: DT_FLOAT
<a name="line-16587"></a>      type: DT_COMPLEX64
<a name="line-16588"></a>      type: DT_COMPLEX128
<a name="line-16589"></a>    }
<a name="line-16590"></a>  }
<a name="line-16591"></a>  name: "T"
<a name="line-16592"></a>  type: "type"
<a name="line-16593"></a>}
<a name="line-16594"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-16595"></a>output_arg { name: "s" type_attr: "T" }
<a name="line-16596"></a>output_arg { name: "u" type_attr: "T" }
<a name="line-16597"></a>output_arg { name: "v" type_attr: "T" }
<a name="line-16598"></a>-}</span>
<a name="line-16599"></a>
<a name="line-16600"></a><a name="resizeBicubic"></a><span class='hs-comment'>-- | Resize `images` to `size` using bicubic interpolation.</span>
<a name="line-16601"></a><span class='hs-comment'>--</span>
<a name="line-16602"></a><span class='hs-comment'>-- Input images can be of different types but output images are always float.</span>
<a name="line-16603"></a><span class='hs-definition'>resizeBicubic</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-16604"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-16605"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-16606"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-16607"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-16608"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-16609"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16610"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-16611"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The</span>
<a name="line-16612"></a>                                             <span class='hs-comment'>-- new size for the images.</span>
<a name="line-16613"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __resized_images__: 4-D with shape</span>
<a name="line-16614"></a>                 <span class='hs-comment'>-- `[batch, new_height, new_width, channels]`.</span>
<a name="line-16615"></a><span class='hs-definition'>resizeBicubic</span> <span class='hs-varid'>images</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16616"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeBicubic"</span>
<a name="line-16617"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16618"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>size</span>
<a name="line-16619"></a><span class='hs-comment'>{-
<a name="line-16620"></a>attr {
<a name="line-16621"></a>  allowed_values {
<a name="line-16622"></a>    list {
<a name="line-16623"></a>      type: DT_UINT8
<a name="line-16624"></a>      type: DT_INT8
<a name="line-16625"></a>      type: DT_INT16
<a name="line-16626"></a>      type: DT_INT32
<a name="line-16627"></a>      type: DT_INT64
<a name="line-16628"></a>      type: DT_HALF
<a name="line-16629"></a>      type: DT_FLOAT
<a name="line-16630"></a>      type: DT_DOUBLE
<a name="line-16631"></a>    }
<a name="line-16632"></a>  }
<a name="line-16633"></a>  name: "T"
<a name="line-16634"></a>  type: "type"
<a name="line-16635"></a>}
<a name="line-16636"></a>attr {
<a name="line-16637"></a>  default_value { b: false }
<a name="line-16638"></a>  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
<a name="line-16639"></a>  name: "align_corners"
<a name="line-16640"></a>  type: "bool"
<a name="line-16641"></a>}
<a name="line-16642"></a>input_arg {
<a name="line-16643"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-16644"></a>  name: "images"
<a name="line-16645"></a>  type_attr: "T"
<a name="line-16646"></a>}
<a name="line-16647"></a>input_arg {
<a name="line-16648"></a>  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
<a name="line-16649"></a>  name: "size"
<a name="line-16650"></a>  type: DT_INT32
<a name="line-16651"></a>}
<a name="line-16652"></a>output_arg {
<a name="line-16653"></a>  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
<a name="line-16654"></a>  name: "resized_images"
<a name="line-16655"></a>  type: DT_FLOAT
<a name="line-16656"></a>}
<a name="line-16657"></a>-}</span>
<a name="line-16658"></a>
<a name="line-16659"></a><a name="hSVToRGB"></a><span class='hs-comment'>-- | Convert one or more images from HSV to RGB.</span>
<a name="line-16660"></a><span class='hs-comment'>--</span>
<a name="line-16661"></a><span class='hs-comment'>-- Outputs a tensor of the same shape as the `images` tensor, containing the RGB</span>
<a name="line-16662"></a><span class='hs-comment'>-- value of the pixels. The output is only well defined if the value in `images`</span>
<a name="line-16663"></a><span class='hs-comment'>-- are in `[0,1]`.</span>
<a name="line-16664"></a><span class='hs-comment'>-- </span>
<a name="line-16665"></a><span class='hs-comment'>-- See `rgb_to_hsv` for a description of the HSV encoding.</span>
<a name="line-16666"></a><span class='hs-definition'>hSVToRGB</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16667"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.</span>
<a name="line-16668"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: `images` converted to RGB.</span>
<a name="line-16669"></a><span class='hs-definition'>hSVToRGB</span> <span class='hs-varid'>images</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16670"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"HSVToRGB"</span>
<a name="line-16671"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16672"></a>        <span class='hs-varid'>images</span>
<a name="line-16673"></a><span class='hs-comment'>{-
<a name="line-16674"></a>attr {
<a name="line-16675"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-16676"></a>  default_value { type: DT_FLOAT }
<a name="line-16677"></a>  name: "T"
<a name="line-16678"></a>  type: "type"
<a name="line-16679"></a>}
<a name="line-16680"></a>input_arg {
<a name="line-16681"></a>  description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3."
<a name="line-16682"></a>  name: "images"
<a name="line-16683"></a>  type_attr: "T"
<a name="line-16684"></a>}
<a name="line-16685"></a>output_arg {
<a name="line-16686"></a>  description: "`images` converted to RGB."
<a name="line-16687"></a>  name: "output"
<a name="line-16688"></a>  type_attr: "T"
<a name="line-16689"></a>}
<a name="line-16690"></a>-}</span>
<a name="line-16691"></a>
<a name="line-16692"></a><span class='hs-comment'>-- | Performs 3D average pooling on the input.</span>
<a name="line-16693"></a>
<a name="line-16694"></a><a name="avgPool3D"></a><span class='hs-definition'>avgPool3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16695"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-16696"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-16697"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-16698"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-16699"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-16700"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-16701"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16702"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.</span>
<a name="line-16703"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The average pooled output tensor.</span>
<a name="line-16704"></a><span class='hs-definition'>avgPool3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16705"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AvgPool3D"</span>
<a name="line-16706"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16707"></a>        <span class='hs-varid'>input</span>
<a name="line-16708"></a><span class='hs-comment'>{-
<a name="line-16709"></a>attr {
<a name="line-16710"></a>  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
<a name="line-16711"></a>  has_minimum: true
<a name="line-16712"></a>  minimum: 5
<a name="line-16713"></a>  name: "ksize"
<a name="line-16714"></a>  type: "list(int)"
<a name="line-16715"></a>}
<a name="line-16716"></a>attr {
<a name="line-16717"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-16718"></a>  has_minimum: true
<a name="line-16719"></a>  minimum: 5
<a name="line-16720"></a>  name: "strides"
<a name="line-16721"></a>  type: "list(int)"
<a name="line-16722"></a>}
<a name="line-16723"></a>attr {
<a name="line-16724"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-16725"></a>  description: "The type of padding algorithm to use."
<a name="line-16726"></a>  name: "padding"
<a name="line-16727"></a>  type: "string"
<a name="line-16728"></a>}
<a name="line-16729"></a>attr {
<a name="line-16730"></a>  allowed_values {
<a name="line-16731"></a>    list {
<a name="line-16732"></a>      type: DT_FLOAT
<a name="line-16733"></a>      type: DT_DOUBLE
<a name="line-16734"></a>      type: DT_INT64
<a name="line-16735"></a>      type: DT_INT32
<a name="line-16736"></a>      type: DT_UINT8
<a name="line-16737"></a>      type: DT_UINT16
<a name="line-16738"></a>      type: DT_INT16
<a name="line-16739"></a>      type: DT_INT8
<a name="line-16740"></a>      type: DT_COMPLEX64
<a name="line-16741"></a>      type: DT_COMPLEX128
<a name="line-16742"></a>      type: DT_QINT8
<a name="line-16743"></a>      type: DT_QUINT8
<a name="line-16744"></a>      type: DT_QINT32
<a name="line-16745"></a>      type: DT_HALF
<a name="line-16746"></a>    }
<a name="line-16747"></a>  }
<a name="line-16748"></a>  name: "T"
<a name="line-16749"></a>  type: "type"
<a name="line-16750"></a>}
<a name="line-16751"></a>input_arg {
<a name="line-16752"></a>  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
<a name="line-16753"></a>  name: "input"
<a name="line-16754"></a>  type_attr: "T"
<a name="line-16755"></a>}
<a name="line-16756"></a>output_arg {
<a name="line-16757"></a>  description: "The average pooled output tensor."
<a name="line-16758"></a>  name: "output"
<a name="line-16759"></a>  type_attr: "T"
<a name="line-16760"></a>}
<a name="line-16761"></a>-}</span>
<a name="line-16762"></a>
<a name="line-16763"></a><span class='hs-comment'>-- | Delete the stack from its resource container.</span>
<a name="line-16764"></a>
<a name="line-16765"></a><a name="stackClose"></a><span class='hs-definition'>stackClose</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a stack.</span>
<a name="line-16766"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-16767"></a><span class='hs-definition'>stackClose</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16768"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StackClose"</span><span class='hs-layout'>)</span>
<a name="line-16769"></a>        <span class='hs-varid'>handle</span>
<a name="line-16770"></a><span class='hs-comment'>{-
<a name="line-16771"></a>input_arg {
<a name="line-16772"></a>  description: "The handle to a stack."
<a name="line-16773"></a>  is_ref: true
<a name="line-16774"></a>  name: "handle"
<a name="line-16775"></a>  type: DT_STRING
<a name="line-16776"></a>}
<a name="line-16777"></a>-}</span>
<a name="line-16778"></a>
<a name="line-16779"></a><a name="assignVariableOp"></a><span class='hs-comment'>-- | Assigns a new value to a variable.</span>
<a name="line-16780"></a><span class='hs-comment'>--</span>
<a name="line-16781"></a><span class='hs-comment'>-- Any ReadVariableOp with a control dependency on this op is guaranteed to return</span>
<a name="line-16782"></a><span class='hs-comment'>-- this value or a subsequent newer value of the variable.</span>
<a name="line-16783"></a><span class='hs-definition'>assignVariableOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16784"></a>                    <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: handle to the resource in which to store the variable.</span>
<a name="line-16785"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: the value to set the new tensor to use.</span>
<a name="line-16786"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-16787"></a><span class='hs-definition'>assignVariableOp</span> <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16788"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AssignVariableOp"</span>
<a name="line-16789"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16790"></a>        <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span>
<a name="line-16791"></a><span class='hs-comment'>{-
<a name="line-16792"></a>attr {
<a name="line-16793"></a>  description: "the dtype of the value." name: "dtype" type: "type"
<a name="line-16794"></a>}
<a name="line-16795"></a>input_arg {
<a name="line-16796"></a>  description: "handle to the resource in which to store the variable."
<a name="line-16797"></a>  name: "resource"
<a name="line-16798"></a>  type: DT_RESOURCE
<a name="line-16799"></a>}
<a name="line-16800"></a>input_arg {
<a name="line-16801"></a>  description: "the value to set the new tensor to use."
<a name="line-16802"></a>  name: "value"
<a name="line-16803"></a>  type_attr: "dtype"
<a name="line-16804"></a>}
<a name="line-16805"></a>-}</span>
<a name="line-16806"></a>
<a name="line-16807"></a><a name="lRN"></a><span class='hs-comment'>-- | Local Response Normalization.</span>
<a name="line-16808"></a><span class='hs-comment'>--</span>
<a name="line-16809"></a><span class='hs-comment'>-- The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last</span>
<a name="line-16810"></a><span class='hs-comment'>-- dimension), and each vector is normalized independently.  Within a given vector,</span>
<a name="line-16811"></a><span class='hs-comment'>-- each component is divided by the weighted, squared sum of inputs within</span>
<a name="line-16812"></a><span class='hs-comment'>-- `depth_radius`.  In detail,</span>
<a name="line-16813"></a><span class='hs-comment'>-- </span>
<a name="line-16814"></a><span class='hs-comment'>--     sqr_sum[a, b, c, d] =</span>
<a name="line-16815"></a><span class='hs-comment'>--         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)</span>
<a name="line-16816"></a><span class='hs-comment'>--     output = input / (bias + alpha * sqr_sum) ** beta</span>
<a name="line-16817"></a><span class='hs-comment'>-- </span>
<a name="line-16818"></a><span class='hs-comment'>-- For details, see [Krizhevsky et al., ImageNet classification with deep</span>
<a name="line-16819"></a><span class='hs-comment'>-- convolutional neural networks (NIPS 2012)](<a href="http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).">http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).</a></span>
<a name="line-16820"></a><span class='hs-definition'>lRN</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16821"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D.</span>
<a name="line-16822"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-16823"></a><span class='hs-definition'>lRN</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16824"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LRN"</span>
<a name="line-16825"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16826"></a>        <span class='hs-varid'>input</span>
<a name="line-16827"></a><span class='hs-comment'>{-
<a name="line-16828"></a>attr {
<a name="line-16829"></a>  default_value { i: 5 }
<a name="line-16830"></a>  description: "0-D.  Half-width of the 1-D normalization window."
<a name="line-16831"></a>  name: "depth_radius"
<a name="line-16832"></a>  type: "int"
<a name="line-16833"></a>}
<a name="line-16834"></a>attr {
<a name="line-16835"></a>  default_value { f: 1.0 }
<a name="line-16836"></a>  description: "An offset (usually positive to avoid dividing by 0)."
<a name="line-16837"></a>  name: "bias"
<a name="line-16838"></a>  type: "float"
<a name="line-16839"></a>}
<a name="line-16840"></a>attr {
<a name="line-16841"></a>  default_value { f: 1.0 }
<a name="line-16842"></a>  description: "A scale factor, usually positive."
<a name="line-16843"></a>  name: "alpha"
<a name="line-16844"></a>  type: "float"
<a name="line-16845"></a>}
<a name="line-16846"></a>attr {
<a name="line-16847"></a>  default_value { f: 0.5 }
<a name="line-16848"></a>  description: "An exponent."
<a name="line-16849"></a>  name: "beta"
<a name="line-16850"></a>  type: "float"
<a name="line-16851"></a>}
<a name="line-16852"></a>attr {
<a name="line-16853"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-16854"></a>  default_value { type: DT_FLOAT }
<a name="line-16855"></a>  name: "T"
<a name="line-16856"></a>  type: "type"
<a name="line-16857"></a>}
<a name="line-16858"></a>input_arg { description: "4-D." name: "input" type_attr: "T" }
<a name="line-16859"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-16860"></a>-}</span>
<a name="line-16861"></a>
<a name="line-16862"></a><a name="zeta"></a><span class='hs-comment'>-- | Compute the Hurwitz zeta function \\(\zeta(x, q)\\).</span>
<a name="line-16863"></a><span class='hs-comment'>--</span>
<a name="line-16864"></a><span class='hs-comment'>-- The Hurwitz zeta function is defined as:</span>
<a name="line-16865"></a><span class='hs-comment'>-- </span>
<a name="line-16866"></a><span class='hs-comment'>-- ```</span>
<a name="line-16867"></a><span class='hs-comment'>-- \zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}</span>
<a name="line-16868"></a><span class='hs-comment'>-- ```</span>
<a name="line-16869"></a><span class='hs-definition'>zeta</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16870"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-16871"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __q__</span>
<a name="line-16872"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-16873"></a><span class='hs-definition'>zeta</span> <span class='hs-varid'>x</span> <span class='hs-varid'>q</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16874"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Zeta"</span>
<a name="line-16875"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16876"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>q</span>
<a name="line-16877"></a><span class='hs-comment'>{-
<a name="line-16878"></a>attr {
<a name="line-16879"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-16880"></a>  name: "T"
<a name="line-16881"></a>  type: "type"
<a name="line-16882"></a>}
<a name="line-16883"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-16884"></a>input_arg { name: "q" type_attr: "T" }
<a name="line-16885"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-16886"></a>-}</span>
<a name="line-16887"></a>
<a name="line-16888"></a><a name="tensorArrayGradV2"></a><span class='hs-comment'>-- | Creates a TensorArray for storing the gradients of values in the given handle.</span>
<a name="line-16889"></a><span class='hs-comment'>--</span>
<a name="line-16890"></a><span class='hs-comment'>-- If the given TensorArray gradient already exists, returns a reference to it.</span>
<a name="line-16891"></a><span class='hs-comment'>-- </span>
<a name="line-16892"></a><span class='hs-comment'>-- Locks the size of the original TensorArray by disabling its dynamic size flag.</span>
<a name="line-16893"></a><span class='hs-comment'>-- </span>
<a name="line-16894"></a><span class='hs-comment'>-- **A note about the input flow_in:**</span>
<a name="line-16895"></a><span class='hs-comment'>-- </span>
<a name="line-16896"></a><span class='hs-comment'>-- The handle flow_in forces the execution of the gradient lookup to occur</span>
<a name="line-16897"></a><span class='hs-comment'>-- only after certain other operations have occurred.  For example, when</span>
<a name="line-16898"></a><span class='hs-comment'>-- the forward TensorArray is dynamically sized, writes to this TensorArray</span>
<a name="line-16899"></a><span class='hs-comment'>-- may resize the object.  The gradient TensorArray is statically sized based</span>
<a name="line-16900"></a><span class='hs-comment'>-- on the size of the forward TensorArray when this operation executes.</span>
<a name="line-16901"></a><span class='hs-comment'>-- Furthermore, the size of the forward TensorArray is frozen by this call.</span>
<a name="line-16902"></a><span class='hs-comment'>-- As a result, the flow is used to ensure that the call to generate the gradient</span>
<a name="line-16903"></a><span class='hs-comment'>-- TensorArray only happens after all writes are executed.</span>
<a name="line-16904"></a><span class='hs-comment'>-- </span>
<a name="line-16905"></a><span class='hs-comment'>-- In the case of dynamically sized TensorArrays, gradient computation should</span>
<a name="line-16906"></a><span class='hs-comment'>-- only be performed on read operations that have themselves been chained via</span>
<a name="line-16907"></a><span class='hs-comment'>-- flow to occur only after all writes have executed. That way the final size</span>
<a name="line-16908"></a><span class='hs-comment'>-- of the forward TensorArray is known when this operation is called.</span>
<a name="line-16909"></a><span class='hs-comment'>-- </span>
<a name="line-16910"></a><span class='hs-comment'>-- **A note about the source attribute:**</span>
<a name="line-16911"></a><span class='hs-comment'>-- </span>
<a name="line-16912"></a><span class='hs-comment'>-- TensorArray gradient calls use an accumulator TensorArray object.  If</span>
<a name="line-16913"></a><span class='hs-comment'>-- multiple gradients are calculated and run in the same session, the multiple</span>
<a name="line-16914"></a><span class='hs-comment'>-- gradient nodes may accidentally flow throuth the same accumulator TensorArray.</span>
<a name="line-16915"></a><span class='hs-comment'>-- This double counts and generally breaks the TensorArray gradient flow.</span>
<a name="line-16916"></a><span class='hs-comment'>-- </span>
<a name="line-16917"></a><span class='hs-comment'>-- The solution is to identify which gradient call this particular</span>
<a name="line-16918"></a><span class='hs-comment'>-- TensorArray gradient is being called in.  This is performed by identifying</span>
<a name="line-16919"></a><span class='hs-comment'>-- a unique string (e.g. "gradients", "gradients_1", ...) from the input</span>
<a name="line-16920"></a><span class='hs-comment'>-- gradient Tensor's name.  This string is used as a suffix when creating</span>
<a name="line-16921"></a><span class='hs-comment'>-- the TensorArray gradient object here (the attribute `source`).</span>
<a name="line-16922"></a><span class='hs-comment'>-- </span>
<a name="line-16923"></a><span class='hs-comment'>-- The attribute `source` is added as a suffix to the forward TensorArray's</span>
<a name="line-16924"></a><span class='hs-comment'>-- name when performing the creation / lookup, so that each separate gradient</span>
<a name="line-16925"></a><span class='hs-comment'>-- calculation gets its own TensorArray accumulator.</span>
<a name="line-16926"></a><span class='hs-definition'>tensorArrayGradV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to the forward TensorArray.</span>
<a name="line-16927"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-16928"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __grad_handle__</span>
<a name="line-16929"></a><span class='hs-definition'>tensorArrayGradV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16930"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayGradV2"</span><span class='hs-layout'>)</span>
<a name="line-16931"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-16932"></a><span class='hs-comment'>{-
<a name="line-16933"></a>attr {
<a name="line-16934"></a>  description: "The gradient source string, used to decide which gradient TensorArray\nto return."
<a name="line-16935"></a>  name: "source"
<a name="line-16936"></a>  type: "string"
<a name="line-16937"></a>}
<a name="line-16938"></a>input_arg {
<a name="line-16939"></a>  description: "The handle to the forward TensorArray."
<a name="line-16940"></a>  name: "handle"
<a name="line-16941"></a>  type: DT_STRING
<a name="line-16942"></a>}
<a name="line-16943"></a>input_arg {
<a name="line-16944"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-16945"></a>  name: "flow_in"
<a name="line-16946"></a>  type: DT_FLOAT
<a name="line-16947"></a>}
<a name="line-16948"></a>output_arg { name: "grad_handle" type: DT_STRING }
<a name="line-16949"></a>-}</span>
<a name="line-16950"></a>
<a name="line-16951"></a><span class='hs-comment'>-- | Cast x of type SrcT to y of DstT.</span>
<a name="line-16952"></a>
<a name="line-16953"></a><a name="cast"></a><span class='hs-definition'>cast</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>srcT</span> <span class='hs-varid'>dstT</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>srcT</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>dstT</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16954"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>srcT</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-16955"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dstT</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-16956"></a><span class='hs-definition'>cast</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16957"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Cast"</span>
<a name="line-16958"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"SrcT"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>srcT</span><span class='hs-layout'>)</span>
<a name="line-16959"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"DstT"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dstT</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16960"></a>        <span class='hs-varid'>x</span>
<a name="line-16961"></a><span class='hs-comment'>{-
<a name="line-16962"></a>attr { name: "SrcT" type: "type" }
<a name="line-16963"></a>attr { name: "DstT" type: "type" }
<a name="line-16964"></a>input_arg { name: "x" type_attr: "SrcT" }
<a name="line-16965"></a>output_arg { name: "y" type_attr: "DstT" }
<a name="line-16966"></a>-}</span>
<a name="line-16967"></a>
<a name="line-16968"></a><span class='hs-comment'>-- | Computes the Gauss error function of `x` element-wise.</span>
<a name="line-16969"></a>
<a name="line-16970"></a><a name="erf"></a><span class='hs-definition'>erf</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-16971"></a>                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-16972"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-16973"></a><span class='hs-definition'>erf</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16974"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Erf"</span>
<a name="line-16975"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16976"></a>        <span class='hs-varid'>x</span>
<a name="line-16977"></a><span class='hs-comment'>{-
<a name="line-16978"></a>attr {
<a name="line-16979"></a>  allowed_values {
<a name="line-16980"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-16981"></a>  }
<a name="line-16982"></a>  name: "T"
<a name="line-16983"></a>  type: "type"
<a name="line-16984"></a>}
<a name="line-16985"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-16986"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-16987"></a>-}</span>
<a name="line-16988"></a>
<a name="line-16989"></a><span class='hs-comment'>-- | </span>
<a name="line-16990"></a>
<a name="line-16991"></a><a name="batchMatrixTriangularSolve"></a><span class='hs-definition'>batchMatrixTriangularSolve</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-16992"></a>                                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-16993"></a>                              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__</span>
<a name="line-16994"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__</span>
<a name="line-16995"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-16996"></a><span class='hs-definition'>batchMatrixTriangularSolve</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-16997"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixTriangularSolve"</span>
<a name="line-16998"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-16999"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span>
<a name="line-17000"></a><span class='hs-comment'>{-
<a name="line-17001"></a>attr { default_value { b: true } name: "lower" type: "bool" }
<a name="line-17002"></a>attr { default_value { b: false } name: "adjoint" type: "bool" }
<a name="line-17003"></a>attr {
<a name="line-17004"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17005"></a>  name: "T"
<a name="line-17006"></a>  type: "type"
<a name="line-17007"></a>}
<a name="line-17008"></a>input_arg { name: "matrix" type_attr: "T" }
<a name="line-17009"></a>input_arg { name: "rhs" type_attr: "T" }
<a name="line-17010"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-17011"></a>-}</span>
<a name="line-17012"></a>
<a name="line-17013"></a><a name="resourceScatterAdd"></a><span class='hs-comment'>-- | Adds sparse updates to the variable referenced by `resource`.</span>
<a name="line-17014"></a><span class='hs-comment'>--</span>
<a name="line-17015"></a><span class='hs-comment'>-- This operation computes</span>
<a name="line-17016"></a><span class='hs-comment'>-- </span>
<a name="line-17017"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-17018"></a><span class='hs-comment'>--     ref[indices, ...] += updates[...]</span>
<a name="line-17019"></a><span class='hs-comment'>-- </span>
<a name="line-17020"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-17021"></a><span class='hs-comment'>--     ref[indices[i], ...] += updates[i, ...]</span>
<a name="line-17022"></a><span class='hs-comment'>-- </span>
<a name="line-17023"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-17024"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]</span>
<a name="line-17025"></a><span class='hs-comment'>-- </span>
<a name="line-17026"></a><span class='hs-comment'>-- Duplicate entries are handled correctly: if multiple `indices` reference</span>
<a name="line-17027"></a><span class='hs-comment'>-- the same location, their contributions add.</span>
<a name="line-17028"></a><span class='hs-comment'>-- </span>
<a name="line-17029"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-17030"></a><span class='hs-comment'>-- </span>
<a name="line-17031"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-17032"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterAdd.png" alt&gt;</span>
<a name="line-17033"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-17034"></a><span class='hs-definition'>resourceScatterAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-17035"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17036"></a>                                                             <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17037"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-17038"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17039"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-17040"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-17041"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-17042"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-17043"></a>                                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-17044"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-17045"></a>                                                     <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-17046"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17047"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17048"></a>                      <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: Should be from a `Variable` node.</span>
<a name="line-17049"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-17050"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __updates__: A tensor of updated values to add to `ref`.</span>
<a name="line-17051"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-17052"></a><span class='hs-definition'>resourceScatterAdd</span> <span class='hs-varid'>resource</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17053"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResourceScatterAdd"</span>
<a name="line-17054"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-17055"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17056"></a>        <span class='hs-varid'>resource</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-17057"></a><span class='hs-comment'>{-
<a name="line-17058"></a>attr {
<a name="line-17059"></a>  allowed_values {
<a name="line-17060"></a>    list {
<a name="line-17061"></a>      type: DT_FLOAT
<a name="line-17062"></a>      type: DT_DOUBLE
<a name="line-17063"></a>      type: DT_INT64
<a name="line-17064"></a>      type: DT_INT32
<a name="line-17065"></a>      type: DT_UINT8
<a name="line-17066"></a>      type: DT_UINT16
<a name="line-17067"></a>      type: DT_INT16
<a name="line-17068"></a>      type: DT_INT8
<a name="line-17069"></a>      type: DT_COMPLEX64
<a name="line-17070"></a>      type: DT_COMPLEX128
<a name="line-17071"></a>      type: DT_QINT8
<a name="line-17072"></a>      type: DT_QUINT8
<a name="line-17073"></a>      type: DT_QINT32
<a name="line-17074"></a>      type: DT_HALF
<a name="line-17075"></a>    }
<a name="line-17076"></a>  }
<a name="line-17077"></a>  name: "dtype"
<a name="line-17078"></a>  type: "type"
<a name="line-17079"></a>}
<a name="line-17080"></a>attr {
<a name="line-17081"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-17082"></a>  name: "Tindices"
<a name="line-17083"></a>  type: "type"
<a name="line-17084"></a>}
<a name="line-17085"></a>input_arg {
<a name="line-17086"></a>  description: "Should be from a `Variable` node."
<a name="line-17087"></a>  name: "resource"
<a name="line-17088"></a>  type: DT_RESOURCE
<a name="line-17089"></a>}
<a name="line-17090"></a>input_arg {
<a name="line-17091"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-17092"></a>  name: "indices"
<a name="line-17093"></a>  type_attr: "Tindices"
<a name="line-17094"></a>}
<a name="line-17095"></a>input_arg {
<a name="line-17096"></a>  description: "A tensor of updated values to add to `ref`."
<a name="line-17097"></a>  name: "updates"
<a name="line-17098"></a>  type_attr: "dtype"
<a name="line-17099"></a>}
<a name="line-17100"></a>-}</span>
<a name="line-17101"></a>
<a name="line-17102"></a><span class='hs-comment'>-- | </span>
<a name="line-17103"></a>
<a name="line-17104"></a><a name="batchCholeskyGrad"></a><span class='hs-definition'>batchCholeskyGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-17105"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17106"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l__</span>
<a name="line-17107"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__</span>
<a name="line-17108"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-17109"></a><span class='hs-definition'>batchCholeskyGrad</span> <span class='hs-varid'>l</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17110"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchCholeskyGrad"</span>
<a name="line-17111"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17112"></a>        <span class='hs-varid'>l</span> <span class='hs-varid'>grad</span>
<a name="line-17113"></a><span class='hs-comment'>{-
<a name="line-17114"></a>attr {
<a name="line-17115"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-17116"></a>  name: "T"
<a name="line-17117"></a>  type: "type"
<a name="line-17118"></a>}
<a name="line-17119"></a>input_arg { name: "l" type_attr: "T" }
<a name="line-17120"></a>input_arg { name: "grad" type_attr: "T" }
<a name="line-17121"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-17122"></a>-}</span>
<a name="line-17123"></a>
<a name="line-17124"></a><span class='hs-comment'>-- | </span>
<a name="line-17125"></a>
<a name="line-17126"></a><a name="batchMatrixInverse"></a><span class='hs-definition'>batchMatrixInverse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17127"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-17128"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-17129"></a><span class='hs-definition'>batchMatrixInverse</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17130"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixInverse"</span>
<a name="line-17131"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17132"></a>        <span class='hs-varid'>input</span>
<a name="line-17133"></a><span class='hs-comment'>{-
<a name="line-17134"></a>attr { default_value { b: false } name: "adjoint" type: "bool" }
<a name="line-17135"></a>attr {
<a name="line-17136"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17137"></a>  name: "T"
<a name="line-17138"></a>  type: "type"
<a name="line-17139"></a>}
<a name="line-17140"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-17141"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-17142"></a>-}</span>
<a name="line-17143"></a>
<a name="line-17144"></a><span class='hs-comment'>-- | Return the same ref tensor as the input ref tensor.</span>
<a name="line-17145"></a>
<a name="line-17146"></a><a name="refIdentity"></a><span class='hs-definition'>refIdentity</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-17147"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-17148"></a><span class='hs-definition'>refIdentity</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17149"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefIdentity"</span>
<a name="line-17150"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17151"></a>        <span class='hs-varid'>input</span>
<a name="line-17152"></a><span class='hs-comment'>{-
<a name="line-17153"></a>attr { name: "T" type: "type" }
<a name="line-17154"></a>input_arg { is_ref: true name: "input" type_attr: "T" }
<a name="line-17155"></a>output_arg { is_ref: true name: "output" type_attr: "T" }
<a name="line-17156"></a>-}</span>
<a name="line-17157"></a>
<a name="line-17158"></a><a name="svd"></a><span class='hs-comment'>-- | Computes the singular value decompositions of one or more matrices.</span>
<a name="line-17159"></a><span class='hs-comment'>--</span>
<a name="line-17160"></a><span class='hs-comment'>-- Computes the SVD of each inner matrix in `input` such that</span>
<a name="line-17161"></a><span class='hs-comment'>-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`</span>
<a name="line-17162"></a><span class='hs-comment'>-- </span>
<a name="line-17163"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-17164"></a><span class='hs-comment'>-- # a is a tensor containing a batch of matrices.</span>
<a name="line-17165"></a><span class='hs-comment'>-- # s is a tensor of singular values for each matrix.</span>
<a name="line-17166"></a><span class='hs-comment'>-- # u is the tensor containing of left singular vectors for each matrix.</span>
<a name="line-17167"></a><span class='hs-comment'>-- # v is the tensor containing of right singular vectors for each matrix.</span>
<a name="line-17168"></a><span class='hs-comment'>-- s, u, v = svd(a)</span>
<a name="line-17169"></a><span class='hs-comment'>-- s, _, _ = svd(a, compute_uv=False)</span>
<a name="line-17170"></a><span class='hs-comment'>-- ```</span>
<a name="line-17171"></a><span class='hs-definition'>svd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17172"></a>                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17173"></a>                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17174"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions</span>
<a name="line-17175"></a>                   <span class='hs-comment'>-- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.</span>
<a name="line-17176"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-17177"></a>       <span class='hs-comment'>-- ^ (__s__, __u__, __v__)</span>
<a name="line-17178"></a>       <span class='hs-comment'>--</span>
<a name="line-17179"></a>       <span class='hs-comment'>-- * __s__: Singular values. Shape is `[..., P]`.</span>
<a name="line-17180"></a>       <span class='hs-comment'>--</span>
<a name="line-17181"></a>       <span class='hs-comment'>-- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is</span>
<a name="line-17182"></a>       <span class='hs-comment'>-- `[..., M, M]`; if `full_matrices` is `True` then shape is</span>
<a name="line-17183"></a>       <span class='hs-comment'>-- `[..., M, P]`. Undefined if `compute_uv` is `False`.</span>
<a name="line-17184"></a>       <span class='hs-comment'>--</span>
<a name="line-17185"></a>       <span class='hs-comment'>-- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is</span>
<a name="line-17186"></a>       <span class='hs-comment'>-- `[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.</span>
<a name="line-17187"></a>       <span class='hs-comment'>-- Undefined if `compute_uv` is false.</span>
<a name="line-17188"></a><span class='hs-definition'>svd</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17189"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Svd"</span>
<a name="line-17190"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17191"></a>        <span class='hs-varid'>input</span>
<a name="line-17192"></a><span class='hs-comment'>{-
<a name="line-17193"></a>attr {
<a name="line-17194"></a>  default_value { b: true }
<a name="line-17195"></a>  description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced."
<a name="line-17196"></a>  name: "compute_uv"
<a name="line-17197"></a>  type: "bool"
<a name="line-17198"></a>}
<a name="line-17199"></a>attr {
<a name="line-17200"></a>  default_value { b: false }
<a name="line-17201"></a>  description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`."
<a name="line-17202"></a>  name: "full_matrices"
<a name="line-17203"></a>  type: "bool"
<a name="line-17204"></a>}
<a name="line-17205"></a>attr {
<a name="line-17206"></a>  allowed_values {
<a name="line-17207"></a>    list {
<a name="line-17208"></a>      type: DT_DOUBLE
<a name="line-17209"></a>      type: DT_FLOAT
<a name="line-17210"></a>      type: DT_COMPLEX64
<a name="line-17211"></a>      type: DT_COMPLEX128
<a name="line-17212"></a>    }
<a name="line-17213"></a>  }
<a name="line-17214"></a>  name: "T"
<a name="line-17215"></a>  type: "type"
<a name="line-17216"></a>}
<a name="line-17217"></a>input_arg {
<a name="line-17218"></a>  description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`."
<a name="line-17219"></a>  name: "input"
<a name="line-17220"></a>  type_attr: "T"
<a name="line-17221"></a>}
<a name="line-17222"></a>output_arg {
<a name="line-17223"></a>  description: "Singular values. Shape is `[..., P]`."
<a name="line-17224"></a>  name: "s"
<a name="line-17225"></a>  type_attr: "T"
<a name="line-17226"></a>}
<a name="line-17227"></a>output_arg {
<a name="line-17228"></a>  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, M]`; if `full_matrices` is `True` then shape is\n`[..., M, P]`. Undefined if `compute_uv` is `False`."
<a name="line-17229"></a>  name: "u"
<a name="line-17230"></a>  type_attr: "T"
<a name="line-17231"></a>}
<a name="line-17232"></a>output_arg {
<a name="line-17233"></a>  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.\nUndefined if `compute_uv` is false."
<a name="line-17234"></a>  name: "v"
<a name="line-17235"></a>  type_attr: "T"
<a name="line-17236"></a>}
<a name="line-17237"></a>-}</span>
<a name="line-17238"></a>
<a name="line-17239"></a><a name="matrixSolveLs"></a><span class='hs-comment'>-- | Solves one or more linear least-squares problems.</span>
<a name="line-17240"></a><span class='hs-comment'>--</span>
<a name="line-17241"></a><span class='hs-comment'>-- `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions</span>
<a name="line-17242"></a><span class='hs-comment'>-- form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`.</span>
<a name="line-17243"></a><span class='hs-comment'>-- The output is a tensor shape `[..., N, K]` where each output matrix solves</span>
<a name="line-17244"></a><span class='hs-comment'>-- each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]</span>
<a name="line-17245"></a><span class='hs-comment'>-- in the least squares sense.</span>
<a name="line-17246"></a><span class='hs-comment'>-- </span>
<a name="line-17247"></a><span class='hs-comment'>-- matrix and right-hand sides in the batch:</span>
<a name="line-17248"></a><span class='hs-comment'>-- </span>
<a name="line-17249"></a><span class='hs-comment'>-- `matrix`=\\(A \in \Re^{m \times n}\\),</span>
<a name="line-17250"></a><span class='hs-comment'>-- `rhs`=\\(B  \in \Re^{m \times k}\\),</span>
<a name="line-17251"></a><span class='hs-comment'>-- `output`=\\(X  \in \Re^{n \times k}\\),</span>
<a name="line-17252"></a><span class='hs-comment'>-- `l2_regularizer`=\\(\lambda\\).</span>
<a name="line-17253"></a><span class='hs-comment'>-- </span>
<a name="line-17254"></a><span class='hs-comment'>-- If `fast` is `True`, then the solution is computed by solving the normal</span>
<a name="line-17255"></a><span class='hs-comment'>-- equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then</span>
<a name="line-17256"></a><span class='hs-comment'>-- \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares</span>
<a name="line-17257"></a><span class='hs-comment'>-- problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +</span>
<a name="line-17258"></a><span class='hs-comment'>-- \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as</span>
<a name="line-17259"></a><span class='hs-comment'>-- \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the</span>
<a name="line-17260"></a><span class='hs-comment'>-- minimum-norm solution to the under-determined linear system, i.e.</span>
<a name="line-17261"></a><span class='hs-comment'>-- \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to</span>
<a name="line-17262"></a><span class='hs-comment'>-- \\(A Z = B\\). Notice that the fast path is only numerically stable when</span>
<a name="line-17263"></a><span class='hs-comment'>-- \\(A\\) is numerically full rank and has a condition number</span>
<a name="line-17264"></a><span class='hs-comment'>-- \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\) is</span>
<a name="line-17265"></a><span class='hs-comment'>-- sufficiently large.</span>
<a name="line-17266"></a><span class='hs-comment'>-- </span>
<a name="line-17267"></a><span class='hs-comment'>-- If `fast` is `False` an algorithm based on the numerically robust complete</span>
<a name="line-17268"></a><span class='hs-comment'>-- orthogonal decomposition is used. This computes the minimum-norm</span>
<a name="line-17269"></a><span class='hs-comment'>-- least-squares solution, even when \\(A\\) is rank deficient. This path is</span>
<a name="line-17270"></a><span class='hs-comment'>-- typically 6-7 times slower than the fast path. If `fast` is `False` then</span>
<a name="line-17271"></a><span class='hs-comment'>-- `l2_regularizer` is ignored.</span>
<a name="line-17272"></a><span class='hs-definition'>matrixSolveLs</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17273"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__: Shape is `[..., M, N]`.</span>
<a name="line-17274"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__: Shape is `[..., M, K]`.</span>
<a name="line-17275"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Double</span> <span class='hs-comment'>-- ^ __l2_regularizer__: Scalar tensor.</span>
<a name="line-17276"></a>                                     <span class='hs-comment'>-- </span>
<a name="line-17277"></a>                                     <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-17278"></a>                                     <span class='hs-comment'>-- Equivalent to np.linalg.lstsq</span>
<a name="line-17279"></a>                                     <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-17280"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., N, K]`.</span>
<a name="line-17281"></a><span class='hs-definition'>matrixSolveLs</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-varid'>l2_regularizer</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17282"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixSolveLs"</span>
<a name="line-17283"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17284"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-varid'>l2_regularizer</span>
<a name="line-17285"></a><span class='hs-comment'>{-
<a name="line-17286"></a>attr {
<a name="line-17287"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17288"></a>  name: "T"
<a name="line-17289"></a>  type: "type"
<a name="line-17290"></a>}
<a name="line-17291"></a>attr { default_value { b: true } name: "fast" type: "bool" }
<a name="line-17292"></a>input_arg {
<a name="line-17293"></a>  description: "Shape is `[..., M, N]`."
<a name="line-17294"></a>  name: "matrix"
<a name="line-17295"></a>  type_attr: "T"
<a name="line-17296"></a>}
<a name="line-17297"></a>input_arg {
<a name="line-17298"></a>  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
<a name="line-17299"></a>}
<a name="line-17300"></a>input_arg {
<a name="line-17301"></a>  description: "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility"
<a name="line-17302"></a>  name: "l2_regularizer"
<a name="line-17303"></a>  type: DT_DOUBLE
<a name="line-17304"></a>}
<a name="line-17305"></a>output_arg {
<a name="line-17306"></a>  description: "Shape is `[..., N, K]`."
<a name="line-17307"></a>  name: "output"
<a name="line-17308"></a>  type_attr: "T"
<a name="line-17309"></a>}
<a name="line-17310"></a>-}</span>
<a name="line-17311"></a>
<a name="line-17312"></a><a name="pack"></a><span class='hs-comment'>-- | Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.</span>
<a name="line-17313"></a><span class='hs-comment'>--</span>
<a name="line-17314"></a><span class='hs-comment'>-- Packs the `N` tensors in `values` into a tensor with rank one higher than each</span>
<a name="line-17315"></a><span class='hs-comment'>-- tensor in `values`, by packing them along the `axis` dimension.</span>
<a name="line-17316"></a><span class='hs-comment'>-- Given a list of tensors of shape `(A, B, C)`;</span>
<a name="line-17317"></a><span class='hs-comment'>-- </span>
<a name="line-17318"></a><span class='hs-comment'>-- if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.</span>
<a name="line-17319"></a><span class='hs-comment'>-- if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.</span>
<a name="line-17320"></a><span class='hs-comment'>-- Etc.</span>
<a name="line-17321"></a><span class='hs-comment'>-- </span>
<a name="line-17322"></a><span class='hs-comment'>-- For example:</span>
<a name="line-17323"></a><span class='hs-comment'>-- </span>
<a name="line-17324"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-17325"></a><span class='hs-comment'>-- # 'x' is [1, 4]</span>
<a name="line-17326"></a><span class='hs-comment'>-- # 'y' is [2, 5]</span>
<a name="line-17327"></a><span class='hs-comment'>-- # 'z' is [3, 6]</span>
<a name="line-17328"></a><span class='hs-comment'>-- pack([x, y, z]) =&gt; [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.</span>
<a name="line-17329"></a><span class='hs-comment'>-- pack([x, y, z], axis=1) =&gt; [[1, 2, 3], [4, 5, 6]]</span>
<a name="line-17330"></a><span class='hs-comment'>-- ```</span>
<a name="line-17331"></a><span class='hs-comment'>-- </span>
<a name="line-17332"></a><span class='hs-comment'>-- This is the opposite of `unpack`.</span>
<a name="line-17333"></a><span class='hs-definition'>pack</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17334"></a>        <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __values__: Must be of same shape and type.</span>
<a name="line-17335"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The packed tensor.</span>
<a name="line-17336"></a><span class='hs-definition'>pack</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17337"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Pack"</span>
<a name="line-17338"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-17339"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-17340"></a>        <span class='hs-varid'>values</span>
<a name="line-17341"></a>  <span class='hs-keyword'>where</span>
<a name="line-17342"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-17343"></a><span class='hs-comment'>{-
<a name="line-17344"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-17345"></a>attr { name: "T" type: "type" }
<a name="line-17346"></a>attr {
<a name="line-17347"></a>  default_value { i: 0 }
<a name="line-17348"></a>  description: "Dimension along which to pack.  Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`."
<a name="line-17349"></a>  name: "axis"
<a name="line-17350"></a>  type: "int"
<a name="line-17351"></a>}
<a name="line-17352"></a>input_arg {
<a name="line-17353"></a>  description: "Must be of same shape and type."
<a name="line-17354"></a>  name: "values"
<a name="line-17355"></a>  number_attr: "N"
<a name="line-17356"></a>  type_attr: "T"
<a name="line-17357"></a>}
<a name="line-17358"></a>output_arg {
<a name="line-17359"></a>  description: "The packed tensor." name: "output" type_attr: "T"
<a name="line-17360"></a>}
<a name="line-17361"></a>-}</span>
<a name="line-17362"></a>
<a name="line-17363"></a><a name="barrierClose"></a><span class='hs-comment'>-- | Closes the given barrier.</span>
<a name="line-17364"></a><span class='hs-comment'>--</span>
<a name="line-17365"></a><span class='hs-comment'>-- This operation signals that no more new elements will be inserted in the</span>
<a name="line-17366"></a><span class='hs-comment'>-- given barrier. Subsequent InsertMany that try to introduce a new key will fail.</span>
<a name="line-17367"></a><span class='hs-comment'>-- Subsequent InsertMany operations that just add missing components to already</span>
<a name="line-17368"></a><span class='hs-comment'>-- existing elements will continue to succeed. Subsequent TakeMany operations will</span>
<a name="line-17369"></a><span class='hs-comment'>-- continue to succeed if sufficient completed elements remain in the barrier.</span>
<a name="line-17370"></a><span class='hs-comment'>-- Subsequent TakeMany operations that would block will fail immediately.</span>
<a name="line-17371"></a><span class='hs-definition'>barrierClose</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a barrier.</span>
<a name="line-17372"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-17373"></a><span class='hs-definition'>barrierClose</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17374"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BarrierClose"</span><span class='hs-layout'>)</span>
<a name="line-17375"></a>        <span class='hs-varid'>handle</span>
<a name="line-17376"></a><span class='hs-comment'>{-
<a name="line-17377"></a>attr {
<a name="line-17378"></a>  default_value { b: false }
<a name="line-17379"></a>  description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be cancelled. InsertMany will fail, even\nif no new key is introduced."
<a name="line-17380"></a>  name: "cancel_pending_enqueues"
<a name="line-17381"></a>  type: "bool"
<a name="line-17382"></a>}
<a name="line-17383"></a>input_arg {
<a name="line-17384"></a>  description: "The handle to a barrier."
<a name="line-17385"></a>  is_ref: true
<a name="line-17386"></a>  name: "handle"
<a name="line-17387"></a>  type: DT_STRING
<a name="line-17388"></a>}
<a name="line-17389"></a>-}</span>
<a name="line-17390"></a>
<a name="line-17391"></a><a name="selfAdjointEigV2"></a><span class='hs-comment'>-- | Computes the eigen decomposition of one or more square self-adjoint matrices.</span>
<a name="line-17392"></a><span class='hs-comment'>--</span>
<a name="line-17393"></a><span class='hs-comment'>-- Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in</span>
<a name="line-17394"></a><span class='hs-comment'>-- `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.</span>
<a name="line-17395"></a><span class='hs-comment'>-- </span>
<a name="line-17396"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-17397"></a><span class='hs-comment'>-- # a is a tensor.</span>
<a name="line-17398"></a><span class='hs-comment'>-- # e is a tensor of eigenvalues.</span>
<a name="line-17399"></a><span class='hs-comment'>-- # v is a tensor of eigenvectors.</span>
<a name="line-17400"></a><span class='hs-comment'>-- e, v = self_adjoint_eig(a)</span>
<a name="line-17401"></a><span class='hs-comment'>-- e = self_adjoint_eig(a, compute_v=False)</span>
<a name="line-17402"></a><span class='hs-comment'>-- ```</span>
<a name="line-17403"></a><span class='hs-definition'>selfAdjointEigV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17404"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: `Tensor` input of shape `[N, N]`.</span>
<a name="line-17405"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ (__e__, __v__)</span>
<a name="line-17406"></a>                    <span class='hs-comment'>--</span>
<a name="line-17407"></a>                    <span class='hs-comment'>-- * __e__: Eigenvalues. Shape is `[N]`.</span>
<a name="line-17408"></a>                    <span class='hs-comment'>--</span>
<a name="line-17409"></a>                    <span class='hs-comment'>-- * __v__: Eigenvectors. Shape is `[N, N]`.</span>
<a name="line-17410"></a><span class='hs-definition'>selfAdjointEigV2</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17411"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SelfAdjointEigV2"</span>
<a name="line-17412"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17413"></a>        <span class='hs-varid'>input</span>
<a name="line-17414"></a><span class='hs-comment'>{-
<a name="line-17415"></a>attr {
<a name="line-17416"></a>  default_value { b: true }
<a name="line-17417"></a>  description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed."
<a name="line-17418"></a>  name: "compute_v"
<a name="line-17419"></a>  type: "bool"
<a name="line-17420"></a>}
<a name="line-17421"></a>attr {
<a name="line-17422"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17423"></a>  name: "T"
<a name="line-17424"></a>  type: "type"
<a name="line-17425"></a>}
<a name="line-17426"></a>input_arg {
<a name="line-17427"></a>  description: "`Tensor` input of shape `[N, N]`."
<a name="line-17428"></a>  name: "input"
<a name="line-17429"></a>  type_attr: "T"
<a name="line-17430"></a>}
<a name="line-17431"></a>output_arg {
<a name="line-17432"></a>  description: "Eigenvalues. Shape is `[N]`."
<a name="line-17433"></a>  name: "e"
<a name="line-17434"></a>  type_attr: "T"
<a name="line-17435"></a>}
<a name="line-17436"></a>output_arg {
<a name="line-17437"></a>  description: "Eigenvectors. Shape is `[N, N]`."
<a name="line-17438"></a>  name: "v"
<a name="line-17439"></a>  type_attr: "T"
<a name="line-17440"></a>}
<a name="line-17441"></a>-}</span>
<a name="line-17442"></a>
<a name="line-17443"></a><a name="scatterSub"></a><span class='hs-comment'>-- | Subtracts sparse updates to a variable reference.</span>
<a name="line-17444"></a><span class='hs-comment'>--</span>
<a name="line-17445"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-17446"></a><span class='hs-comment'>--     ref[indices, ...] -= updates[...]</span>
<a name="line-17447"></a><span class='hs-comment'>-- </span>
<a name="line-17448"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-17449"></a><span class='hs-comment'>--     ref[indices[i], ...] -= updates[i, ...]</span>
<a name="line-17450"></a><span class='hs-comment'>-- </span>
<a name="line-17451"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-17452"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]</span>
<a name="line-17453"></a><span class='hs-comment'>-- </span>
<a name="line-17454"></a><span class='hs-comment'>-- This operation outputs `ref` after the update is done.</span>
<a name="line-17455"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-17456"></a><span class='hs-comment'>-- </span>
<a name="line-17457"></a><span class='hs-comment'>-- Duplicate entries are handled correctly: if multiple `indices` reference</span>
<a name="line-17458"></a><span class='hs-comment'>-- the same location, their (negated) contributions add.</span>
<a name="line-17459"></a><span class='hs-comment'>-- </span>
<a name="line-17460"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-17461"></a><span class='hs-comment'>-- </span>
<a name="line-17462"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-17463"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterSub.png" alt&gt;</span>
<a name="line-17464"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-17465"></a><span class='hs-definition'>scatterSub</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-17466"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17467"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17468"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17469"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-17470"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-17471"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-17472"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-17473"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17474"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17475"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-17476"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-17477"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A tensor of updated values to subtract from `ref`.</span>
<a name="line-17478"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want</span>
<a name="line-17479"></a>              <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-17480"></a><span class='hs-definition'>scatterSub</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17481"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterSub"</span>
<a name="line-17482"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-17483"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17484"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-17485"></a><span class='hs-comment'>{-
<a name="line-17486"></a>attr {
<a name="line-17487"></a>  allowed_values {
<a name="line-17488"></a>    list {
<a name="line-17489"></a>      type: DT_FLOAT
<a name="line-17490"></a>      type: DT_DOUBLE
<a name="line-17491"></a>      type: DT_INT64
<a name="line-17492"></a>      type: DT_INT32
<a name="line-17493"></a>      type: DT_UINT8
<a name="line-17494"></a>      type: DT_UINT16
<a name="line-17495"></a>      type: DT_INT16
<a name="line-17496"></a>      type: DT_INT8
<a name="line-17497"></a>      type: DT_COMPLEX64
<a name="line-17498"></a>      type: DT_COMPLEX128
<a name="line-17499"></a>      type: DT_QINT8
<a name="line-17500"></a>      type: DT_QUINT8
<a name="line-17501"></a>      type: DT_QINT32
<a name="line-17502"></a>      type: DT_HALF
<a name="line-17503"></a>    }
<a name="line-17504"></a>  }
<a name="line-17505"></a>  name: "T"
<a name="line-17506"></a>  type: "type"
<a name="line-17507"></a>}
<a name="line-17508"></a>attr {
<a name="line-17509"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-17510"></a>  name: "Tindices"
<a name="line-17511"></a>  type: "type"
<a name="line-17512"></a>}
<a name="line-17513"></a>attr {
<a name="line-17514"></a>  default_value { b: false }
<a name="line-17515"></a>  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-17516"></a>  name: "use_locking"
<a name="line-17517"></a>  type: "bool"
<a name="line-17518"></a>}
<a name="line-17519"></a>input_arg {
<a name="line-17520"></a>  description: "Should be from a `Variable` node."
<a name="line-17521"></a>  is_ref: true
<a name="line-17522"></a>  name: "ref"
<a name="line-17523"></a>  type_attr: "T"
<a name="line-17524"></a>}
<a name="line-17525"></a>input_arg {
<a name="line-17526"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-17527"></a>  name: "indices"
<a name="line-17528"></a>  type_attr: "Tindices"
<a name="line-17529"></a>}
<a name="line-17530"></a>input_arg {
<a name="line-17531"></a>  description: "A tensor of updated values to subtract from `ref`."
<a name="line-17532"></a>  name: "updates"
<a name="line-17533"></a>  type_attr: "T"
<a name="line-17534"></a>}
<a name="line-17535"></a>output_arg {
<a name="line-17536"></a>  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-17537"></a>  is_ref: true
<a name="line-17538"></a>  name: "output_ref"
<a name="line-17539"></a>  type_attr: "T"
<a name="line-17540"></a>}
<a name="line-17541"></a>-}</span>
<a name="line-17542"></a>
<a name="line-17543"></a><a name="selfAdjointEig"></a><span class='hs-comment'>-- | Computes the Eigen Decomposition of a batch of square self-adjoint matrices.</span>
<a name="line-17544"></a><span class='hs-comment'>--</span>
<a name="line-17545"></a><span class='hs-comment'>-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions</span>
<a name="line-17546"></a><span class='hs-comment'>-- form square matrices, with the same constraints as the single matrix</span>
<a name="line-17547"></a><span class='hs-comment'>-- SelfAdjointEig.</span>
<a name="line-17548"></a><span class='hs-comment'>-- </span>
<a name="line-17549"></a><span class='hs-comment'>-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the</span>
<a name="line-17550"></a><span class='hs-comment'>-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.</span>
<a name="line-17551"></a><span class='hs-definition'>selfAdjointEig</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17552"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape is `[..., M, M]`.</span>
<a name="line-17553"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., M+1, M]`.</span>
<a name="line-17554"></a><span class='hs-definition'>selfAdjointEig</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17555"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SelfAdjointEig"</span>
<a name="line-17556"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17557"></a>        <span class='hs-varid'>input</span>
<a name="line-17558"></a><span class='hs-comment'>{-
<a name="line-17559"></a>attr {
<a name="line-17560"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17561"></a>  name: "T"
<a name="line-17562"></a>  type: "type"
<a name="line-17563"></a>}
<a name="line-17564"></a>input_arg {
<a name="line-17565"></a>  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
<a name="line-17566"></a>}
<a name="line-17567"></a>output_arg {
<a name="line-17568"></a>  description: "Shape is `[..., M+1, M]`."
<a name="line-17569"></a>  name: "output"
<a name="line-17570"></a>  type_attr: "T"
<a name="line-17571"></a>}
<a name="line-17572"></a>-}</span>
<a name="line-17573"></a>
<a name="line-17574"></a><a name="stopGradient"></a><span class='hs-comment'>-- | Stops gradient computation.</span>
<a name="line-17575"></a><span class='hs-comment'>--</span>
<a name="line-17576"></a><span class='hs-comment'>-- When executed in a graph, this op outputs its input tensor as-is.</span>
<a name="line-17577"></a><span class='hs-comment'>-- </span>
<a name="line-17578"></a><span class='hs-comment'>-- When building ops to compute gradients, this op prevents the contribution of</span>
<a name="line-17579"></a><span class='hs-comment'>-- its inputs to be taken into account.  Normally, the gradient generator adds ops</span>
<a name="line-17580"></a><span class='hs-comment'>-- to a graph to compute the derivatives of a specified 'loss' by recursively</span>
<a name="line-17581"></a><span class='hs-comment'>-- finding out inputs that contributed to its computation.  If you insert this op</span>
<a name="line-17582"></a><span class='hs-comment'>-- in the graph it inputs are masked from the gradient generator.  They are not</span>
<a name="line-17583"></a><span class='hs-comment'>-- taken into account for computing gradients.</span>
<a name="line-17584"></a><span class='hs-comment'>-- </span>
<a name="line-17585"></a><span class='hs-comment'>-- This is useful any time you want to compute a value with TensorFlow but need</span>
<a name="line-17586"></a><span class='hs-comment'>-- to pretend that the value was a constant. Some examples include:</span>
<a name="line-17587"></a><span class='hs-comment'>-- </span>
<a name="line-17588"></a><span class='hs-comment'>-- *  The *EM* algorithm where the *M-step* should not involve backpropagation</span>
<a name="line-17589"></a><span class='hs-comment'>--    through the output of the *E-step*.</span>
<a name="line-17590"></a><span class='hs-comment'>-- *  Contrastive divergence training of Boltzmann machines where, when</span>
<a name="line-17591"></a><span class='hs-comment'>--    differentiating the energy function, the training must not backpropagate</span>
<a name="line-17592"></a><span class='hs-comment'>--    through the graph that generated the samples from the model.</span>
<a name="line-17593"></a><span class='hs-comment'>-- *  Adversarial training, where no backprop should happen through the adversarial</span>
<a name="line-17594"></a><span class='hs-comment'>--    example generation process.</span>
<a name="line-17595"></a><span class='hs-definition'>stopGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-17596"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-17597"></a><span class='hs-definition'>stopGradient</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17598"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StopGradient"</span>
<a name="line-17599"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17600"></a>        <span class='hs-varid'>input</span>
<a name="line-17601"></a><span class='hs-comment'>{-
<a name="line-17602"></a>attr { name: "T" type: "type" }
<a name="line-17603"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-17604"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-17605"></a>-}</span>
<a name="line-17606"></a>
<a name="line-17607"></a><span class='hs-comment'>-- | Returns the index with the largest value across dimensions of a tensor.</span>
<a name="line-17608"></a>
<a name="line-17609"></a><a name="argMax"></a><span class='hs-definition'>argMax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-17610"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17611"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17612"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17613"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-17614"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-17615"></a>                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-17616"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17617"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17618"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-17619"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __dimension__: int32, 0 &lt;= dimension &lt; rank(input).  Describes which dimension</span>
<a name="line-17620"></a>                            <span class='hs-comment'>-- of the input Tensor to reduce across. For vectors, use dimension = 0.</span>
<a name="line-17621"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-17622"></a><span class='hs-definition'>argMax</span> <span class='hs-varid'>input</span> <span class='hs-varid'>dimension</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17623"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ArgMax"</span>
<a name="line-17624"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-17625"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17626"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>dimension</span>
<a name="line-17627"></a><span class='hs-comment'>{-
<a name="line-17628"></a>attr {
<a name="line-17629"></a>  allowed_values {
<a name="line-17630"></a>    list {
<a name="line-17631"></a>      type: DT_FLOAT
<a name="line-17632"></a>      type: DT_DOUBLE
<a name="line-17633"></a>      type: DT_INT64
<a name="line-17634"></a>      type: DT_INT32
<a name="line-17635"></a>      type: DT_UINT8
<a name="line-17636"></a>      type: DT_UINT16
<a name="line-17637"></a>      type: DT_INT16
<a name="line-17638"></a>      type: DT_INT8
<a name="line-17639"></a>      type: DT_COMPLEX64
<a name="line-17640"></a>      type: DT_COMPLEX128
<a name="line-17641"></a>      type: DT_QINT8
<a name="line-17642"></a>      type: DT_QUINT8
<a name="line-17643"></a>      type: DT_QINT32
<a name="line-17644"></a>      type: DT_HALF
<a name="line-17645"></a>    }
<a name="line-17646"></a>  }
<a name="line-17647"></a>  name: "T"
<a name="line-17648"></a>  type: "type"
<a name="line-17649"></a>}
<a name="line-17650"></a>attr {
<a name="line-17651"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-17652"></a>  default_value { type: DT_INT32 }
<a name="line-17653"></a>  name: "Tidx"
<a name="line-17654"></a>  type: "type"
<a name="line-17655"></a>}
<a name="line-17656"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-17657"></a>input_arg {
<a name="line-17658"></a>  description: "int32, 0 &lt;= dimension &lt; rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
<a name="line-17659"></a>  name: "dimension"
<a name="line-17660"></a>  type_attr: "Tidx"
<a name="line-17661"></a>}
<a name="line-17662"></a>output_arg { name: "output" type: DT_INT64 }
<a name="line-17663"></a>-}</span>
<a name="line-17664"></a>
<a name="line-17665"></a><a name="choleskyGrad"></a><span class='hs-comment'>-- | Computes the reverse mode backpropagated gradient of the Cholesky algorithm.</span>
<a name="line-17666"></a><span class='hs-comment'>--</span>
<a name="line-17667"></a><span class='hs-comment'>-- For an explanation see "Differentiation of the Cholesky algorithm" by</span>
<a name="line-17668"></a><span class='hs-comment'>-- Iain Murray <a href="http://arxiv.org/abs/1602.07527.">http://arxiv.org/abs/1602.07527.</a></span>
<a name="line-17669"></a><span class='hs-definition'>choleskyGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17670"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.</span>
<a name="line-17671"></a>                            <span class='hs-comment'>-- Algorithm depends only on lower triangular part of the innermost matrices of</span>
<a name="line-17672"></a>                            <span class='hs-comment'>-- this tensor.</span>
<a name="line-17673"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`.</span>
<a name="line-17674"></a>                               <span class='hs-comment'>-- Algorithm depends only on lower triangular part of the innermost matrices of</span>
<a name="line-17675"></a>                               <span class='hs-comment'>-- this tensor.</span>
<a name="line-17676"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]`</span>
<a name="line-17677"></a><span class='hs-definition'>choleskyGrad</span> <span class='hs-varid'>l</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17678"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CholeskyGrad"</span>
<a name="line-17679"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17680"></a>        <span class='hs-varid'>l</span> <span class='hs-varid'>grad</span>
<a name="line-17681"></a><span class='hs-comment'>{-
<a name="line-17682"></a>attr {
<a name="line-17683"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-17684"></a>  name: "T"
<a name="line-17685"></a>  type: "type"
<a name="line-17686"></a>}
<a name="line-17687"></a>input_arg {
<a name="line-17688"></a>  description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
<a name="line-17689"></a>  name: "l"
<a name="line-17690"></a>  type_attr: "T"
<a name="line-17691"></a>}
<a name="line-17692"></a>input_arg {
<a name="line-17693"></a>  description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
<a name="line-17694"></a>  name: "grad"
<a name="line-17695"></a>  type_attr: "T"
<a name="line-17696"></a>}
<a name="line-17697"></a>output_arg {
<a name="line-17698"></a>  description: "Symmetrized version of df/dA . Shape is `[..., M, M]`"
<a name="line-17699"></a>  name: "output"
<a name="line-17700"></a>  type_attr: "T"
<a name="line-17701"></a>}
<a name="line-17702"></a>-}</span>
<a name="line-17703"></a>
<a name="line-17704"></a><a name="sparseReshape"></a><span class='hs-comment'>-- | Reshapes a SparseTensor to represent values in a new dense shape.</span>
<a name="line-17705"></a><span class='hs-comment'>--</span>
<a name="line-17706"></a><span class='hs-comment'>-- This operation has the same semantics as reshape on the represented dense</span>
<a name="line-17707"></a><span class='hs-comment'>-- tensor.  The `input_indices` are recomputed based on the requested `new_shape`.</span>
<a name="line-17708"></a><span class='hs-comment'>-- </span>
<a name="line-17709"></a><span class='hs-comment'>-- If one component of `new_shape` is the special value -1, the size of that</span>
<a name="line-17710"></a><span class='hs-comment'>-- dimension is computed so that the total dense size remains constant.  At</span>
<a name="line-17711"></a><span class='hs-comment'>-- most one component of `new_shape` can be -1.  The number of dense elements</span>
<a name="line-17712"></a><span class='hs-comment'>-- implied by `new_shape` must be the same as the number of dense elements</span>
<a name="line-17713"></a><span class='hs-comment'>-- originally implied by `input_shape`.</span>
<a name="line-17714"></a><span class='hs-comment'>-- </span>
<a name="line-17715"></a><span class='hs-comment'>-- Reshaping does not affect the order of values in the SparseTensor.</span>
<a name="line-17716"></a><span class='hs-comment'>-- </span>
<a name="line-17717"></a><span class='hs-comment'>-- If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`</span>
<a name="line-17718"></a><span class='hs-comment'>-- has length `R_out`, then `input_indices` has shape `[N, R_in]`,</span>
<a name="line-17719"></a><span class='hs-comment'>-- `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and</span>
<a name="line-17720"></a><span class='hs-comment'>-- `output_shape` has length `R_out`.</span>
<a name="line-17721"></a><span class='hs-definition'>sparseReshape</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_indices__: 2-D.  `N x R_in` matrix with the indices of non-empty values in a</span>
<a name="line-17722"></a>                                          <span class='hs-comment'>-- SparseTensor.</span>
<a name="line-17723"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_shape__: 1-D.  `R_in` vector with the input SparseTensor's dense shape.</span>
<a name="line-17724"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __new_shape__: 1-D.  `R_out` vector with the requested new dense shape.</span>
<a name="line-17725"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-17726"></a>                 <span class='hs-comment'>-- ^ (__output_indices__, __output_shape__)</span>
<a name="line-17727"></a>                 <span class='hs-comment'>--</span>
<a name="line-17728"></a>                 <span class='hs-comment'>-- * __output_indices__: 2-D.  `N x R_out` matrix with the updated indices of non-empty</span>
<a name="line-17729"></a>                 <span class='hs-comment'>-- values in the output SparseTensor.</span>
<a name="line-17730"></a>                 <span class='hs-comment'>--</span>
<a name="line-17731"></a>                 <span class='hs-comment'>-- * __output_shape__: 1-D.  `R_out` vector with the full dense shape of the output</span>
<a name="line-17732"></a>                 <span class='hs-comment'>-- SparseTensor.  This is the same as `new_shape` but with any -1 dimensions</span>
<a name="line-17733"></a>                 <span class='hs-comment'>-- filled in.</span>
<a name="line-17734"></a><span class='hs-definition'>sparseReshape</span> <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_shape</span> <span class='hs-varid'>new_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17735"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseReshape"</span><span class='hs-layout'>)</span>
<a name="line-17736"></a>        <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_shape</span> <span class='hs-varid'>new_shape</span>
<a name="line-17737"></a><span class='hs-comment'>{-
<a name="line-17738"></a>input_arg {
<a name="line-17739"></a>  description: "2-D.  `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor."
<a name="line-17740"></a>  name: "input_indices"
<a name="line-17741"></a>  type: DT_INT64
<a name="line-17742"></a>}
<a name="line-17743"></a>input_arg {
<a name="line-17744"></a>  description: "1-D.  `R_in` vector with the input SparseTensor\'s dense shape."
<a name="line-17745"></a>  name: "input_shape"
<a name="line-17746"></a>  type: DT_INT64
<a name="line-17747"></a>}
<a name="line-17748"></a>input_arg {
<a name="line-17749"></a>  description: "1-D.  `R_out` vector with the requested new dense shape."
<a name="line-17750"></a>  name: "new_shape"
<a name="line-17751"></a>  type: DT_INT64
<a name="line-17752"></a>}
<a name="line-17753"></a>output_arg {
<a name="line-17754"></a>  description: "2-D.  `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor."
<a name="line-17755"></a>  name: "output_indices"
<a name="line-17756"></a>  type: DT_INT64
<a name="line-17757"></a>}
<a name="line-17758"></a>output_arg {
<a name="line-17759"></a>  description: "1-D.  `R_out` vector with the full dense shape of the output\nSparseTensor.  This is the same as `new_shape` but with any -1 dimensions\nfilled in."
<a name="line-17760"></a>  name: "output_shape"
<a name="line-17761"></a>  type: DT_INT64
<a name="line-17762"></a>}
<a name="line-17763"></a>-}</span>
<a name="line-17764"></a>
<a name="line-17765"></a><span class='hs-comment'>-- | var: Should be from a Variable().</span>
<a name="line-17766"></a>
<a name="line-17767"></a><a name="sparseApplyAdadelta"></a><span class='hs-definition'>sparseApplyAdadelta</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-17768"></a>                                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17769"></a>                                                                   <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-17770"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-17771"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17772"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-17773"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-17774"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-17775"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-17776"></a>                                                                   <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-17777"></a>                                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-17778"></a>                                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-17779"></a>                                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17780"></a>                                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17781"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__</span>
<a name="line-17782"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-17783"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum_update__: : Should be from a Variable().</span>
<a name="line-17784"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Learning rate. Must be a scalar.</span>
<a name="line-17785"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay factor. Must be a scalar.</span>
<a name="line-17786"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Constant factor. Must be a scalar.</span>
<a name="line-17787"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-17788"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var and accum.</span>
<a name="line-17789"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-17790"></a><span class='hs-definition'>sparseApplyAdadelta</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>accum_update</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-17791"></a>                    <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17792"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyAdadelta"</span>
<a name="line-17793"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-17794"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17795"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>accum_update</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-17796"></a><span class='hs-comment'>{-
<a name="line-17797"></a>attr {
<a name="line-17798"></a>  allowed_values {
<a name="line-17799"></a>    list {
<a name="line-17800"></a>      type: DT_FLOAT
<a name="line-17801"></a>      type: DT_DOUBLE
<a name="line-17802"></a>      type: DT_INT64
<a name="line-17803"></a>      type: DT_INT32
<a name="line-17804"></a>      type: DT_UINT8
<a name="line-17805"></a>      type: DT_UINT16
<a name="line-17806"></a>      type: DT_INT16
<a name="line-17807"></a>      type: DT_INT8
<a name="line-17808"></a>      type: DT_COMPLEX64
<a name="line-17809"></a>      type: DT_COMPLEX128
<a name="line-17810"></a>      type: DT_QINT8
<a name="line-17811"></a>      type: DT_QUINT8
<a name="line-17812"></a>      type: DT_QINT32
<a name="line-17813"></a>      type: DT_HALF
<a name="line-17814"></a>    }
<a name="line-17815"></a>  }
<a name="line-17816"></a>  name: "T"
<a name="line-17817"></a>  type: "type"
<a name="line-17818"></a>}
<a name="line-17819"></a>attr {
<a name="line-17820"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-17821"></a>  name: "Tindices"
<a name="line-17822"></a>  type: "type"
<a name="line-17823"></a>}
<a name="line-17824"></a>attr {
<a name="line-17825"></a>  default_value { b: false }
<a name="line-17826"></a>  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-17827"></a>  name: "use_locking"
<a name="line-17828"></a>  type: "bool"
<a name="line-17829"></a>}
<a name="line-17830"></a>input_arg { is_ref: true name: "var" type_attr: "T" }
<a name="line-17831"></a>input_arg {
<a name="line-17832"></a>  description: "Should be from a Variable()."
<a name="line-17833"></a>  is_ref: true
<a name="line-17834"></a>  name: "accum"
<a name="line-17835"></a>  type_attr: "T"
<a name="line-17836"></a>}
<a name="line-17837"></a>input_arg {
<a name="line-17838"></a>  description: ": Should be from a Variable()."
<a name="line-17839"></a>  is_ref: true
<a name="line-17840"></a>  name: "accum_update"
<a name="line-17841"></a>  type_attr: "T"
<a name="line-17842"></a>}
<a name="line-17843"></a>input_arg {
<a name="line-17844"></a>  description: "Learning rate. Must be a scalar."
<a name="line-17845"></a>  name: "lr"
<a name="line-17846"></a>  type_attr: "T"
<a name="line-17847"></a>}
<a name="line-17848"></a>input_arg {
<a name="line-17849"></a>  description: "Decay factor. Must be a scalar."
<a name="line-17850"></a>  name: "rho"
<a name="line-17851"></a>  type_attr: "T"
<a name="line-17852"></a>}
<a name="line-17853"></a>input_arg {
<a name="line-17854"></a>  description: "Constant factor. Must be a scalar."
<a name="line-17855"></a>  name: "epsilon"
<a name="line-17856"></a>  type_attr: "T"
<a name="line-17857"></a>}
<a name="line-17858"></a>input_arg {
<a name="line-17859"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-17860"></a>}
<a name="line-17861"></a>input_arg {
<a name="line-17862"></a>  description: "A vector of indices into the first dimension of var and accum."
<a name="line-17863"></a>  name: "indices"
<a name="line-17864"></a>  type_attr: "Tindices"
<a name="line-17865"></a>}
<a name="line-17866"></a>output_arg {
<a name="line-17867"></a>  description: "Same as \"var\"."
<a name="line-17868"></a>  is_ref: true
<a name="line-17869"></a>  name: "out"
<a name="line-17870"></a>  type_attr: "T"
<a name="line-17871"></a>}
<a name="line-17872"></a>-}</span>
<a name="line-17873"></a>
<a name="line-17874"></a><span class='hs-comment'>-- | Computes the gradient of morphological 2-D dilation with respect to the filter.</span>
<a name="line-17875"></a>
<a name="line-17876"></a><a name="dilation2DBackpropFilter"></a><span class='hs-definition'>dilation2DBackpropFilter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-17877"></a>                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-17878"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-17879"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-17880"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-17881"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-17882"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-17883"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17884"></a>                            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.</span>
<a name="line-17885"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.</span>
<a name="line-17886"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.</span>
<a name="line-17887"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`.</span>
<a name="line-17888"></a><span class='hs-definition'>dilation2DBackpropFilter</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17889"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Dilation2DBackpropFilter"</span>
<a name="line-17890"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17891"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-17892"></a><span class='hs-comment'>{-
<a name="line-17893"></a>attr {
<a name="line-17894"></a>  allowed_values {
<a name="line-17895"></a>    list {
<a name="line-17896"></a>      type: DT_FLOAT
<a name="line-17897"></a>      type: DT_DOUBLE
<a name="line-17898"></a>      type: DT_INT32
<a name="line-17899"></a>      type: DT_INT64
<a name="line-17900"></a>      type: DT_UINT8
<a name="line-17901"></a>      type: DT_INT16
<a name="line-17902"></a>      type: DT_INT8
<a name="line-17903"></a>      type: DT_UINT16
<a name="line-17904"></a>      type: DT_HALF
<a name="line-17905"></a>    }
<a name="line-17906"></a>  }
<a name="line-17907"></a>  name: "T"
<a name="line-17908"></a>  type: "type"
<a name="line-17909"></a>}
<a name="line-17910"></a>attr {
<a name="line-17911"></a>  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
<a name="line-17912"></a>  has_minimum: true
<a name="line-17913"></a>  minimum: 4
<a name="line-17914"></a>  name: "strides"
<a name="line-17915"></a>  type: "list(int)"
<a name="line-17916"></a>}
<a name="line-17917"></a>attr {
<a name="line-17918"></a>  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
<a name="line-17919"></a>  has_minimum: true
<a name="line-17920"></a>  minimum: 4
<a name="line-17921"></a>  name: "rates"
<a name="line-17922"></a>  type: "list(int)"
<a name="line-17923"></a>}
<a name="line-17924"></a>attr {
<a name="line-17925"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-17926"></a>  description: "The type of padding algorithm to use."
<a name="line-17927"></a>  name: "padding"
<a name="line-17928"></a>  type: "string"
<a name="line-17929"></a>}
<a name="line-17930"></a>input_arg {
<a name="line-17931"></a>  description: "4-D with shape `[batch, in_height, in_width, depth]`."
<a name="line-17932"></a>  name: "input"
<a name="line-17933"></a>  type_attr: "T"
<a name="line-17934"></a>}
<a name="line-17935"></a>input_arg {
<a name="line-17936"></a>  description: "3-D with shape `[filter_height, filter_width, depth]`."
<a name="line-17937"></a>  name: "filter"
<a name="line-17938"></a>  type_attr: "T"
<a name="line-17939"></a>}
<a name="line-17940"></a>input_arg {
<a name="line-17941"></a>  description: "4-D with shape `[batch, out_height, out_width, depth]`."
<a name="line-17942"></a>  name: "out_backprop"
<a name="line-17943"></a>  type_attr: "T"
<a name="line-17944"></a>}
<a name="line-17945"></a>output_arg {
<a name="line-17946"></a>  description: "3-D with shape `[filter_height, filter_width, depth]`."
<a name="line-17947"></a>  name: "filter_backprop"
<a name="line-17948"></a>  type_attr: "T"
<a name="line-17949"></a>}
<a name="line-17950"></a>-}</span>
<a name="line-17951"></a>
<a name="line-17952"></a><span class='hs-comment'>-- | </span>
<a name="line-17953"></a>
<a name="line-17954"></a><a name="batchSelfAdjointEigV2"></a><span class='hs-definition'>batchSelfAdjointEigV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-17955"></a>                                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-17956"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-17957"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ (__e__, __v__)</span>
<a name="line-17958"></a>                         <span class='hs-comment'>--</span>
<a name="line-17959"></a>                         <span class='hs-comment'>-- * __e__</span>
<a name="line-17960"></a>                         <span class='hs-comment'>--</span>
<a name="line-17961"></a>                         <span class='hs-comment'>-- * __v__</span>
<a name="line-17962"></a><span class='hs-definition'>batchSelfAdjointEigV2</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17963"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchSelfAdjointEigV2"</span>
<a name="line-17964"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-17965"></a>        <span class='hs-varid'>input</span>
<a name="line-17966"></a><span class='hs-comment'>{-
<a name="line-17967"></a>attr { default_value { b: true } name: "compute_v" type: "bool" }
<a name="line-17968"></a>attr {
<a name="line-17969"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-17970"></a>  name: "T"
<a name="line-17971"></a>  type: "type"
<a name="line-17972"></a>}
<a name="line-17973"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-17974"></a>output_arg { name: "e" type_attr: "T" }
<a name="line-17975"></a>output_arg { name: "v" type_attr: "T" }
<a name="line-17976"></a>-}</span>
<a name="line-17977"></a>
<a name="line-17978"></a><span class='hs-comment'>-- | Computes the number of incomplete elements in the given barrier.</span>
<a name="line-17979"></a>
<a name="line-17980"></a><a name="barrierIncompleteSize"></a><span class='hs-definition'>barrierIncompleteSize</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a barrier.</span>
<a name="line-17981"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __size__: The number of incomplete elements (i.e. those with some of their value</span>
<a name="line-17982"></a>                         <span class='hs-comment'>-- components not set) in the barrier.</span>
<a name="line-17983"></a><span class='hs-definition'>barrierIncompleteSize</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-17984"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BarrierIncompleteSize"</span><span class='hs-layout'>)</span>
<a name="line-17985"></a>        <span class='hs-varid'>handle</span>
<a name="line-17986"></a><span class='hs-comment'>{-
<a name="line-17987"></a>input_arg {
<a name="line-17988"></a>  description: "The handle to a barrier."
<a name="line-17989"></a>  is_ref: true
<a name="line-17990"></a>  name: "handle"
<a name="line-17991"></a>  type: DT_STRING
<a name="line-17992"></a>}
<a name="line-17993"></a>output_arg {
<a name="line-17994"></a>  description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier."
<a name="line-17995"></a>  name: "size"
<a name="line-17996"></a>  type: DT_INT32
<a name="line-17997"></a>}
<a name="line-17998"></a>-}</span>
<a name="line-17999"></a>
<a name="line-18000"></a><a name="fakeQuantWithMinMaxVars"></a><span class='hs-comment'>-- | Fake-quantize the 'inputs' tensor of type float and shape `[b, h, w, d]` via</span>
<a name="line-18001"></a><span class='hs-comment'>--</span>
<a name="line-18002"></a><span class='hs-comment'>-- global float scalars `min` and `max` to 'outputs' tensor of same shape as</span>
<a name="line-18003"></a><span class='hs-comment'>-- `inputs`.</span>
<a name="line-18004"></a><span class='hs-comment'>-- </span>
<a name="line-18005"></a><span class='hs-comment'>-- [min; max] is the clamping range for the 'inputs' data.  Op divides this range</span>
<a name="line-18006"></a><span class='hs-comment'>-- into 255 steps (total of 256 values), then replaces each 'inputs' value with the</span>
<a name="line-18007"></a><span class='hs-comment'>-- closest of the quantized step values.</span>
<a name="line-18008"></a><span class='hs-comment'>-- </span>
<a name="line-18009"></a><span class='hs-comment'>-- This operation has a gradient and thus allows for training `min` and `max` values.</span>
<a name="line-18010"></a><span class='hs-definition'>fakeQuantWithMinMaxVars</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__</span>
<a name="line-18011"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min__</span>
<a name="line-18012"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max__</span>
<a name="line-18013"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __outputs__</span>
<a name="line-18014"></a><span class='hs-definition'>fakeQuantWithMinMaxVars</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18015"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxVars"</span><span class='hs-layout'>)</span>
<a name="line-18016"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>min</span> <span class='hs-varid'>max</span>
<a name="line-18017"></a><span class='hs-comment'>{-
<a name="line-18018"></a>input_arg { name: "inputs" type: DT_FLOAT }
<a name="line-18019"></a>input_arg { name: "min" type: DT_FLOAT }
<a name="line-18020"></a>input_arg { name: "max" type: DT_FLOAT }
<a name="line-18021"></a>output_arg { name: "outputs" type: DT_FLOAT }
<a name="line-18022"></a>-}</span>
<a name="line-18023"></a>
<a name="line-18024"></a><a name="readVariableOp"></a><span class='hs-comment'>-- | Reads the value of a variable.</span>
<a name="line-18025"></a><span class='hs-comment'>--</span>
<a name="line-18026"></a><span class='hs-comment'>-- The tensor returned by this operation is immutable.</span>
<a name="line-18027"></a><span class='hs-comment'>-- </span>
<a name="line-18028"></a><span class='hs-comment'>-- The value returned by this operation is guaranteed to be influenced by all the</span>
<a name="line-18029"></a><span class='hs-comment'>-- writes on which this operation depends directly or indirectly, and to not be</span>
<a name="line-18030"></a><span class='hs-comment'>-- influenced by any of the writes which depend directly or indirectly on this</span>
<a name="line-18031"></a><span class='hs-comment'>-- operation.</span>
<a name="line-18032"></a><span class='hs-definition'>readVariableOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18033"></a>                  <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: handle to the resource in which to store the variable.</span>
<a name="line-18034"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-18035"></a><span class='hs-definition'>readVariableOp</span> <span class='hs-varid'>resource</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18036"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReadVariableOp"</span>
<a name="line-18037"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18038"></a>        <span class='hs-varid'>resource</span>
<a name="line-18039"></a><span class='hs-comment'>{-
<a name="line-18040"></a>attr {
<a name="line-18041"></a>  description: "the dtype of the value." name: "dtype" type: "type"
<a name="line-18042"></a>}
<a name="line-18043"></a>input_arg {
<a name="line-18044"></a>  description: "handle to the resource in which to store the variable."
<a name="line-18045"></a>  name: "resource"
<a name="line-18046"></a>  type: DT_RESOURCE
<a name="line-18047"></a>}
<a name="line-18048"></a>output_arg { name: "value" type_attr: "dtype" }
<a name="line-18049"></a>-}</span>
<a name="line-18050"></a>
<a name="line-18051"></a><a name="fusedBatchNormGrad"></a><span class='hs-comment'>-- | Gradient for batch normalization.</span>
<a name="line-18052"></a><span class='hs-comment'>--</span>
<a name="line-18053"></a><span class='hs-comment'>-- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".</span>
<a name="line-18054"></a><span class='hs-comment'>-- The size of 1D Tensors matches the dimension C of the 4D Tensors.</span>
<a name="line-18055"></a><span class='hs-definition'>fusedBatchNormGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18056"></a>                                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18057"></a>                                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18058"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-18059"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-18060"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-18061"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-18062"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-18063"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-18064"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18065"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y.</span>
<a name="line-18066"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: A 4D Tensor for input data.</span>
<a name="line-18067"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.</span>
<a name="line-18068"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused</span>
<a name="line-18069"></a>                                     <span class='hs-comment'>-- in the gradient computation.</span>
<a name="line-18070"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance</span>
<a name="line-18071"></a>                                     <span class='hs-comment'>-- in the cuDNN case), to be used in the gradient computation.</span>
<a name="line-18072"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18073"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-18074"></a>                      <span class='hs-comment'>-- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__)</span>
<a name="line-18075"></a>                      <span class='hs-comment'>--</span>
<a name="line-18076"></a>                      <span class='hs-comment'>-- * __x_backprop__: A 4D Tensor for the gradient with respect to x.</span>
<a name="line-18077"></a>                      <span class='hs-comment'>--</span>
<a name="line-18078"></a>                      <span class='hs-comment'>-- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale.</span>
<a name="line-18079"></a>                      <span class='hs-comment'>--</span>
<a name="line-18080"></a>                      <span class='hs-comment'>-- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset.</span>
<a name="line-18081"></a>                      <span class='hs-comment'>--</span>
<a name="line-18082"></a>                      <span class='hs-comment'>-- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm.</span>
<a name="line-18083"></a>                      <span class='hs-comment'>--</span>
<a name="line-18084"></a>                      <span class='hs-comment'>-- * __reserve_space_4__: Unused placeholder to match the variance input</span>
<a name="line-18085"></a>                      <span class='hs-comment'>-- in FusedBatchNorm.</span>
<a name="line-18086"></a><span class='hs-definition'>fusedBatchNormGrad</span> <span class='hs-varid'>y_backprop</span> <span class='hs-varid'>x</span> <span class='hs-varid'>scale</span> <span class='hs-varid'>reserve_space_1</span>
<a name="line-18087"></a>                   <span class='hs-varid'>reserve_space_2</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18088"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FusedBatchNormGrad"</span>
<a name="line-18089"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18090"></a>        <span class='hs-varid'>y_backprop</span> <span class='hs-varid'>x</span> <span class='hs-varid'>scale</span> <span class='hs-varid'>reserve_space_1</span> <span class='hs-varid'>reserve_space_2</span>
<a name="line-18091"></a><span class='hs-comment'>{-
<a name="line-18092"></a>attr {
<a name="line-18093"></a>  allowed_values {
<a name="line-18094"></a>    list {
<a name="line-18095"></a>      type: DT_FLOAT
<a name="line-18096"></a>      type: DT_DOUBLE
<a name="line-18097"></a>      type: DT_INT64
<a name="line-18098"></a>      type: DT_INT32
<a name="line-18099"></a>      type: DT_UINT8
<a name="line-18100"></a>      type: DT_UINT16
<a name="line-18101"></a>      type: DT_INT16
<a name="line-18102"></a>      type: DT_INT8
<a name="line-18103"></a>      type: DT_COMPLEX64
<a name="line-18104"></a>      type: DT_COMPLEX128
<a name="line-18105"></a>      type: DT_QINT8
<a name="line-18106"></a>      type: DT_QUINT8
<a name="line-18107"></a>      type: DT_QINT32
<a name="line-18108"></a>      type: DT_HALF
<a name="line-18109"></a>    }
<a name="line-18110"></a>  }
<a name="line-18111"></a>  description: "The data type for the elements of input and output Tensors."
<a name="line-18112"></a>  name: "T"
<a name="line-18113"></a>  type: "type"
<a name="line-18114"></a>}
<a name="line-18115"></a>attr {
<a name="line-18116"></a>  default_value { f: 1.0e-4 }
<a name="line-18117"></a>  description: "A small float number added to the variance of x."
<a name="line-18118"></a>  name: "epsilon"
<a name="line-18119"></a>  type: "float"
<a name="line-18120"></a>}
<a name="line-18121"></a>attr {
<a name="line-18122"></a>  default_value { s: "NHWC" }
<a name="line-18123"></a>  description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"."
<a name="line-18124"></a>  name: "data_format"
<a name="line-18125"></a>  type: "string"
<a name="line-18126"></a>}
<a name="line-18127"></a>attr {
<a name="line-18128"></a>  default_value { b: true }
<a name="line-18129"></a>  description: "A bool value to indicate the operation is for training (default)\nor inference."
<a name="line-18130"></a>  name: "is_training"
<a name="line-18131"></a>  type: "bool"
<a name="line-18132"></a>}
<a name="line-18133"></a>input_arg {
<a name="line-18134"></a>  description: "A 4D Tensor for the gradient with respect to y."
<a name="line-18135"></a>  name: "y_backprop"
<a name="line-18136"></a>  type_attr: "T"
<a name="line-18137"></a>}
<a name="line-18138"></a>input_arg {
<a name="line-18139"></a>  description: "A 4D Tensor for input data." name: "x" type_attr: "T"
<a name="line-18140"></a>}
<a name="line-18141"></a>input_arg {
<a name="line-18142"></a>  description: "A 1D Tensor for scaling factor, to scale the normalized x."
<a name="line-18143"></a>  name: "scale"
<a name="line-18144"></a>  type_attr: "T"
<a name="line-18145"></a>}
<a name="line-18146"></a>input_arg {
<a name="line-18147"></a>  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
<a name="line-18148"></a>  name: "reserve_space_1"
<a name="line-18149"></a>  type_attr: "T"
<a name="line-18150"></a>}
<a name="line-18151"></a>input_arg {
<a name="line-18152"></a>  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
<a name="line-18153"></a>  name: "reserve_space_2"
<a name="line-18154"></a>  type_attr: "T"
<a name="line-18155"></a>}
<a name="line-18156"></a>output_arg {
<a name="line-18157"></a>  description: "A 4D Tensor for the gradient with respect to x."
<a name="line-18158"></a>  name: "x_backprop"
<a name="line-18159"></a>  type_attr: "T"
<a name="line-18160"></a>}
<a name="line-18161"></a>output_arg {
<a name="line-18162"></a>  description: "A 1D Tensor for the gradient with respect to scale."
<a name="line-18163"></a>  name: "scale_backprop"
<a name="line-18164"></a>  type_attr: "T"
<a name="line-18165"></a>}
<a name="line-18166"></a>output_arg {
<a name="line-18167"></a>  description: "A 1D Tensor for the gradient with respect to offset."
<a name="line-18168"></a>  name: "offset_backprop"
<a name="line-18169"></a>  type_attr: "T"
<a name="line-18170"></a>}
<a name="line-18171"></a>output_arg {
<a name="line-18172"></a>  description: "Unused placeholder to match the mean input in FusedBatchNorm."
<a name="line-18173"></a>  name: "reserve_space_3"
<a name="line-18174"></a>  type_attr: "T"
<a name="line-18175"></a>}
<a name="line-18176"></a>output_arg {
<a name="line-18177"></a>  description: "Unused placeholder to match the variance input\nin FusedBatchNorm."
<a name="line-18178"></a>  name: "reserve_space_4"
<a name="line-18179"></a>  type_attr: "T"
<a name="line-18180"></a>}
<a name="line-18181"></a>-}</span>
<a name="line-18182"></a>
<a name="line-18183"></a><a name="paddingFIFOQueue"></a><span class='hs-comment'>-- | A queue that produces elements in first-in first-out order.</span>
<a name="line-18184"></a><span class='hs-comment'>--</span>
<a name="line-18185"></a><span class='hs-comment'>-- Variable-size shapes are allowed by setting the corresponding shape dimensions</span>
<a name="line-18186"></a><span class='hs-comment'>-- to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum</span>
<a name="line-18187"></a><span class='hs-comment'>-- size of any given element in the minibatch.  See below for details.</span>
<a name="line-18188"></a><span class='hs-definition'>paddingFIFOQueue</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __handle__: The handle to the queue.</span>
<a name="line-18189"></a><span class='hs-definition'>paddingFIFOQueue</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18190"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"PaddingFIFOQueue"</span><span class='hs-layout'>)</span>
<a name="line-18191"></a>        
<a name="line-18192"></a><span class='hs-comment'>{-
<a name="line-18193"></a>attr {
<a name="line-18194"></a>  description: "The type of each component in a value."
<a name="line-18195"></a>  has_minimum: true
<a name="line-18196"></a>  minimum: 1
<a name="line-18197"></a>  name: "component_types"
<a name="line-18198"></a>  type: "list(type)"
<a name="line-18199"></a>}
<a name="line-18200"></a>attr {
<a name="line-18201"></a>  default_value { list { } }
<a name="line-18202"></a>  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1.  In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time."
<a name="line-18203"></a>  has_minimum: true
<a name="line-18204"></a>  name: "shapes"
<a name="line-18205"></a>  type: "list(shape)"
<a name="line-18206"></a>}
<a name="line-18207"></a>attr {
<a name="line-18208"></a>  default_value { i: -1 }
<a name="line-18209"></a>  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
<a name="line-18210"></a>  name: "capacity"
<a name="line-18211"></a>  type: "int"
<a name="line-18212"></a>}
<a name="line-18213"></a>attr {
<a name="line-18214"></a>  default_value { s: "" }
<a name="line-18215"></a>  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
<a name="line-18216"></a>  name: "container"
<a name="line-18217"></a>  type: "string"
<a name="line-18218"></a>}
<a name="line-18219"></a>attr {
<a name="line-18220"></a>  default_value { s: "" }
<a name="line-18221"></a>  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
<a name="line-18222"></a>  name: "shared_name"
<a name="line-18223"></a>  type: "string"
<a name="line-18224"></a>}
<a name="line-18225"></a>output_arg {
<a name="line-18226"></a>  description: "The handle to the queue."
<a name="line-18227"></a>  is_ref: true
<a name="line-18228"></a>  name: "handle"
<a name="line-18229"></a>  type: DT_STRING
<a name="line-18230"></a>}
<a name="line-18231"></a>-}</span>
<a name="line-18232"></a>
<a name="line-18233"></a><a name="matrixInverse"></a><span class='hs-comment'>-- | Computes the inverse of one or more square invertible matrices or their</span>
<a name="line-18234"></a><span class='hs-comment'>--</span>
<a name="line-18235"></a><span class='hs-comment'>-- adjoints (conjugate transposes).</span>
<a name="line-18236"></a><span class='hs-comment'>-- </span>
<a name="line-18237"></a><span class='hs-comment'>-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions</span>
<a name="line-18238"></a><span class='hs-comment'>-- form square matrices. The output is a tensor of the same shape as the input</span>
<a name="line-18239"></a><span class='hs-comment'>-- containing the inverse for all input submatrices `[..., :, :]`.</span>
<a name="line-18240"></a><span class='hs-comment'>-- </span>
<a name="line-18241"></a><span class='hs-comment'>-- The op uses LU decomposition with partial pivoting to compute the inverses.</span>
<a name="line-18242"></a><span class='hs-comment'>-- </span>
<a name="line-18243"></a><span class='hs-comment'>-- If a matrix is not invertible there is no guarantee what the op does. It</span>
<a name="line-18244"></a><span class='hs-comment'>-- may detect the condition and raise an exception or it may simply return a</span>
<a name="line-18245"></a><span class='hs-comment'>-- garbage result.</span>
<a name="line-18246"></a><span class='hs-definition'>matrixInverse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18247"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape is `[..., M, M]`.</span>
<a name="line-18248"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., M, M]`.</span>
<a name="line-18249"></a>                 <span class='hs-comment'>-- </span>
<a name="line-18250"></a>                 <span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-18251"></a>                 <span class='hs-comment'>-- Equivalent to np.linalg.inv</span>
<a name="line-18252"></a>                 <span class='hs-comment'>-- @end_compatibility</span>
<a name="line-18253"></a><span class='hs-definition'>matrixInverse</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18254"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixInverse"</span>
<a name="line-18255"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18256"></a>        <span class='hs-varid'>input</span>
<a name="line-18257"></a><span class='hs-comment'>{-
<a name="line-18258"></a>attr { default_value { b: false } name: "adjoint" type: "bool" }
<a name="line-18259"></a>attr {
<a name="line-18260"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-18261"></a>  name: "T"
<a name="line-18262"></a>  type: "type"
<a name="line-18263"></a>}
<a name="line-18264"></a>input_arg {
<a name="line-18265"></a>  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
<a name="line-18266"></a>}
<a name="line-18267"></a>output_arg {
<a name="line-18268"></a>  description: "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility"
<a name="line-18269"></a>  name: "output"
<a name="line-18270"></a>  type_attr: "T"
<a name="line-18271"></a>}
<a name="line-18272"></a>-}</span>
<a name="line-18273"></a>
<a name="line-18274"></a><a name="audioSummaryV2"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with audio.</span>
<a name="line-18275"></a><span class='hs-comment'>--</span>
<a name="line-18276"></a><span class='hs-comment'>-- The summary has up to `max_outputs` summary values containing audio. The</span>
<a name="line-18277"></a><span class='hs-comment'>-- audio is built from `tensor` which must be 3-D with shape `[batch_size,</span>
<a name="line-18278"></a><span class='hs-comment'>-- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are</span>
<a name="line-18279"></a><span class='hs-comment'>-- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.</span>
<a name="line-18280"></a><span class='hs-comment'>-- </span>
<a name="line-18281"></a><span class='hs-comment'>-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to</span>
<a name="line-18282"></a><span class='hs-comment'>-- build the `tag` of the summary values:</span>
<a name="line-18283"></a><span class='hs-comment'>-- </span>
<a name="line-18284"></a><span class='hs-comment'>-- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.</span>
<a name="line-18285"></a><span class='hs-comment'>-- *  If `max_outputs` is greater than 1, the summary value tags are</span>
<a name="line-18286"></a><span class='hs-comment'>--    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.</span>
<a name="line-18287"></a><span class='hs-definition'>audioSummaryV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.</span>
<a name="line-18288"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __tensor__: 2-D of shape `[batch_size, frames]`.</span>
<a name="line-18289"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __sample_rate__: The sample rate of the signal in hertz.</span>
<a name="line-18290"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.</span>
<a name="line-18291"></a><span class='hs-definition'>audioSummaryV2</span> <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>sample_rate</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18292"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AudioSummaryV2"</span><span class='hs-layout'>)</span>
<a name="line-18293"></a>        <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>sample_rate</span>
<a name="line-18294"></a><span class='hs-comment'>{-
<a name="line-18295"></a>attr {
<a name="line-18296"></a>  default_value { i: 3 }
<a name="line-18297"></a>  description: "Max number of batch elements to generate audio for."
<a name="line-18298"></a>  has_minimum: true
<a name="line-18299"></a>  minimum: 1
<a name="line-18300"></a>  name: "max_outputs"
<a name="line-18301"></a>  type: "int"
<a name="line-18302"></a>}
<a name="line-18303"></a>input_arg {
<a name="line-18304"></a>  description: "Scalar. Used to build the `tag` attribute of the summary values."
<a name="line-18305"></a>  name: "tag"
<a name="line-18306"></a>  type: DT_STRING
<a name="line-18307"></a>}
<a name="line-18308"></a>input_arg {
<a name="line-18309"></a>  description: "2-D of shape `[batch_size, frames]`."
<a name="line-18310"></a>  name: "tensor"
<a name="line-18311"></a>  type: DT_FLOAT
<a name="line-18312"></a>}
<a name="line-18313"></a>input_arg {
<a name="line-18314"></a>  description: "The sample rate of the signal in hertz."
<a name="line-18315"></a>  name: "sample_rate"
<a name="line-18316"></a>  type: DT_FLOAT
<a name="line-18317"></a>}
<a name="line-18318"></a>output_arg {
<a name="line-18319"></a>  description: "Scalar. Serialized `Summary` protocol buffer."
<a name="line-18320"></a>  name: "summary"
<a name="line-18321"></a>  type: DT_STRING
<a name="line-18322"></a>}
<a name="line-18323"></a>-}</span>
<a name="line-18324"></a>
<a name="line-18325"></a><a name="matrixDeterminant"></a><span class='hs-comment'>-- | Computes the determinant of one ore more square matrices.</span>
<a name="line-18326"></a><span class='hs-comment'>--</span>
<a name="line-18327"></a><span class='hs-comment'>-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions</span>
<a name="line-18328"></a><span class='hs-comment'>-- form square matrices. The output is a tensor containing the determinants</span>
<a name="line-18329"></a><span class='hs-comment'>-- for all input submatrices `[..., :, :]`.</span>
<a name="line-18330"></a><span class='hs-definition'>matrixDeterminant</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18331"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape is `[..., M, M]`.</span>
<a name="line-18332"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[...]`.</span>
<a name="line-18333"></a><span class='hs-definition'>matrixDeterminant</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18334"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixDeterminant"</span>
<a name="line-18335"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18336"></a>        <span class='hs-varid'>input</span>
<a name="line-18337"></a><span class='hs-comment'>{-
<a name="line-18338"></a>attr {
<a name="line-18339"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-18340"></a>  name: "T"
<a name="line-18341"></a>  type: "type"
<a name="line-18342"></a>}
<a name="line-18343"></a>input_arg {
<a name="line-18344"></a>  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
<a name="line-18345"></a>}
<a name="line-18346"></a>output_arg {
<a name="line-18347"></a>  description: "Shape is `[...]`." name: "output" type_attr: "T"
<a name="line-18348"></a>}
<a name="line-18349"></a>-}</span>
<a name="line-18350"></a>
<a name="line-18351"></a><span class='hs-comment'>-- | Writes contents to the file at input filename. Creates file if not existing.</span>
<a name="line-18352"></a>
<a name="line-18353"></a><a name="writeFile"></a><span class='hs-definition'>writeFile</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filename__: scalar. The name of the file to which we write the contents.</span>
<a name="line-18354"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: scalar. The content to be written to the output file.</span>
<a name="line-18355"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>ControlNode</span>
<a name="line-18356"></a><span class='hs-definition'>writeFile</span> <span class='hs-varid'>filename</span> <span class='hs-varid'>contents</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18357"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"WriteFile"</span><span class='hs-layout'>)</span>
<a name="line-18358"></a>        <span class='hs-varid'>filename</span> <span class='hs-varid'>contents</span>
<a name="line-18359"></a><span class='hs-comment'>{-
<a name="line-18360"></a>input_arg {
<a name="line-18361"></a>  description: "scalar. The name of the file to which we write the contents."
<a name="line-18362"></a>  name: "filename"
<a name="line-18363"></a>  type: DT_STRING
<a name="line-18364"></a>}
<a name="line-18365"></a>input_arg {
<a name="line-18366"></a>  description: "scalar. The content to be written to the output file."
<a name="line-18367"></a>  name: "contents"
<a name="line-18368"></a>  type: DT_STRING
<a name="line-18369"></a>}
<a name="line-18370"></a>-}</span>
<a name="line-18371"></a>
<a name="line-18372"></a><span class='hs-comment'>-- | Concatenates quantized tensors along one dimension.</span>
<a name="line-18373"></a>
<a name="line-18374"></a><a name="quantizedConcat"></a><span class='hs-definition'>quantizedConcat</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18375"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the</span>
<a name="line-18376"></a>                                            <span class='hs-comment'>-- range [0, rank(values)).</span>
<a name="line-18377"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,</span>
<a name="line-18378"></a>                                    <span class='hs-comment'>-- and their sizes must match in all dimensions except `concat_dim`.</span>
<a name="line-18379"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __input_mins__: The minimum scalar values for each of the input tensors.</span>
<a name="line-18380"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __input_maxes__: The maximum scalar values for each of the input tensors.</span>
<a name="line-18381"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-18382"></a>                   <span class='hs-comment'>-- ^ (__output__, __output_min__, __output_max__)</span>
<a name="line-18383"></a>                   <span class='hs-comment'>--</span>
<a name="line-18384"></a>                   <span class='hs-comment'>-- * __output__: A `Tensor` with the concatenation of values stacked along the</span>
<a name="line-18385"></a>                   <span class='hs-comment'>-- `concat_dim` dimension.  This tensor's shape matches that of `values` except</span>
<a name="line-18386"></a>                   <span class='hs-comment'>-- in `concat_dim` where it has the sum of the sizes.</span>
<a name="line-18387"></a>                   <span class='hs-comment'>--</span>
<a name="line-18388"></a>                   <span class='hs-comment'>-- * __output_min__: The float value that the minimum quantized output value represents.</span>
<a name="line-18389"></a>                   <span class='hs-comment'>--</span>
<a name="line-18390"></a>                   <span class='hs-comment'>-- * __output_max__: The float value that the maximum quantized output value represents.</span>
<a name="line-18391"></a><span class='hs-definition'>quantizedConcat</span> <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>values</span> <span class='hs-varid'>input_mins</span>
<a name="line-18392"></a>                <span class='hs-varid'>input_maxes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18393"></a>                                                    <span class='hs-layout'>(</span><span class='hs-str'>"input_mins"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>input_mins</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18394"></a>                                                    <span class='hs-layout'>(</span><span class='hs-str'>"input_maxes"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>input_maxes</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18395"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedConcat"</span>
<a name="line-18396"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-18397"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-18398"></a>        <span class='hs-varid'>concat_dim</span> <span class='hs-varid'>values</span> <span class='hs-varid'>input_mins</span> <span class='hs-varid'>input_maxes</span>
<a name="line-18399"></a>  <span class='hs-keyword'>where</span>
<a name="line-18400"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>values</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-18401"></a><span class='hs-comment'>{-
<a name="line-18402"></a>attr { has_minimum: true minimum: 2 name: "N" type: "int" }
<a name="line-18403"></a>attr { name: "T" type: "type" }
<a name="line-18404"></a>input_arg {
<a name="line-18405"></a>  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
<a name="line-18406"></a>  name: "concat_dim"
<a name="line-18407"></a>  type: DT_INT32
<a name="line-18408"></a>}
<a name="line-18409"></a>input_arg {
<a name="line-18410"></a>  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
<a name="line-18411"></a>  name: "values"
<a name="line-18412"></a>  number_attr: "N"
<a name="line-18413"></a>  type_attr: "T"
<a name="line-18414"></a>}
<a name="line-18415"></a>input_arg {
<a name="line-18416"></a>  description: "The minimum scalar values for each of the input tensors."
<a name="line-18417"></a>  name: "input_mins"
<a name="line-18418"></a>  number_attr: "N"
<a name="line-18419"></a>  type: DT_FLOAT
<a name="line-18420"></a>}
<a name="line-18421"></a>input_arg {
<a name="line-18422"></a>  description: "The maximum scalar values for each of the input tensors."
<a name="line-18423"></a>  name: "input_maxes"
<a name="line-18424"></a>  number_attr: "N"
<a name="line-18425"></a>  type: DT_FLOAT
<a name="line-18426"></a>}
<a name="line-18427"></a>output_arg {
<a name="line-18428"></a>  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
<a name="line-18429"></a>  name: "output"
<a name="line-18430"></a>  type_attr: "T"
<a name="line-18431"></a>}
<a name="line-18432"></a>output_arg {
<a name="line-18433"></a>  description: "The float value that the minimum quantized output value represents."
<a name="line-18434"></a>  name: "output_min"
<a name="line-18435"></a>  type: DT_FLOAT
<a name="line-18436"></a>}
<a name="line-18437"></a>output_arg {
<a name="line-18438"></a>  description: "The float value that the maximum quantized output value represents."
<a name="line-18439"></a>  name: "output_max"
<a name="line-18440"></a>  type: DT_FLOAT
<a name="line-18441"></a>}
<a name="line-18442"></a>-}</span>
<a name="line-18443"></a>
<a name="line-18444"></a><span class='hs-comment'>-- | Creates a handle to a Variable resource.</span>
<a name="line-18445"></a>
<a name="line-18446"></a><a name="varHandleOp"></a><span class='hs-definition'>varHandleOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18447"></a>               <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: The (possibly partially specified) shape of this variable.</span>
<a name="line-18448"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __resource__</span>
<a name="line-18449"></a><span class='hs-definition'>varHandleOp</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18450"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"VarHandleOp"</span>
<a name="line-18451"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-18452"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-18453"></a>        
<a name="line-18454"></a><span class='hs-comment'>{-
<a name="line-18455"></a>attr {
<a name="line-18456"></a>  default_value { s: "" }
<a name="line-18457"></a>  description: "the container this variable is placed in."
<a name="line-18458"></a>  name: "container"
<a name="line-18459"></a>  type: "string"
<a name="line-18460"></a>}
<a name="line-18461"></a>attr {
<a name="line-18462"></a>  default_value { s: "" }
<a name="line-18463"></a>  description: "the name by which this variable is referred to."
<a name="line-18464"></a>  name: "shared_name"
<a name="line-18465"></a>  type: "string"
<a name="line-18466"></a>}
<a name="line-18467"></a>attr {
<a name="line-18468"></a>  description: "the type of this variable. Must agree with the dtypes\nof all ops using this variable."
<a name="line-18469"></a>  name: "dtype"
<a name="line-18470"></a>  type: "type"
<a name="line-18471"></a>}
<a name="line-18472"></a>attr {
<a name="line-18473"></a>  description: "The (possibly partially specified) shape of this variable."
<a name="line-18474"></a>  name: "shape"
<a name="line-18475"></a>  type: "shape"
<a name="line-18476"></a>}
<a name="line-18477"></a>output_arg { name: "resource" type: DT_RESOURCE }
<a name="line-18478"></a>-}</span>
<a name="line-18479"></a>
<a name="line-18480"></a><a name="stridedSliceAssign"></a><span class='hs-comment'>-- | Assign `value` to the sliced l-value reference of `ref`.</span>
<a name="line-18481"></a><span class='hs-comment'>--</span>
<a name="line-18482"></a><span class='hs-comment'>-- The values of `value` are assigned to the positions in the variable</span>
<a name="line-18483"></a><span class='hs-comment'>-- `ref` that are selected by the slice parameters. The slice parameters</span>
<a name="line-18484"></a><span class='hs-comment'>-- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.</span>
<a name="line-18485"></a><span class='hs-comment'>-- </span>
<a name="line-18486"></a><span class='hs-comment'>-- NOTE this op currently does not support broadcasting and so `value`'s</span>
<a name="line-18487"></a><span class='hs-comment'>-- shape must be exactly the shape produced by the slice of `ref`.</span>
<a name="line-18488"></a><span class='hs-definition'>stridedSliceAssign</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varid'>index</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18489"></a>                                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>index</span><span class='hs-layout'>,</span>
<a name="line-18490"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-18491"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18492"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__</span>
<a name="line-18493"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __begin__</span>
<a name="line-18494"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __end__</span>
<a name="line-18495"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __strides__</span>
<a name="line-18496"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-18497"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__</span>
<a name="line-18498"></a><span class='hs-definition'>stridedSliceAssign</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18499"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StridedSliceAssign"</span>
<a name="line-18500"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-18501"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18502"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span> <span class='hs-varid'>value</span>
<a name="line-18503"></a><span class='hs-comment'>{-
<a name="line-18504"></a>attr { name: "T" type: "type" }
<a name="line-18505"></a>attr {
<a name="line-18506"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-18507"></a>  name: "Index"
<a name="line-18508"></a>  type: "type"
<a name="line-18509"></a>}
<a name="line-18510"></a>attr { default_value { i: 0 } name: "begin_mask" type: "int" }
<a name="line-18511"></a>attr { default_value { i: 0 } name: "end_mask" type: "int" }
<a name="line-18512"></a>attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
<a name="line-18513"></a>attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
<a name="line-18514"></a>attr {
<a name="line-18515"></a>  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
<a name="line-18516"></a>}
<a name="line-18517"></a>input_arg { is_ref: true name: "ref" type_attr: "T" }
<a name="line-18518"></a>input_arg { name: "begin" type_attr: "Index" }
<a name="line-18519"></a>input_arg { name: "end" type_attr: "Index" }
<a name="line-18520"></a>input_arg { name: "strides" type_attr: "Index" }
<a name="line-18521"></a>input_arg { name: "value" type_attr: "T" }
<a name="line-18522"></a>output_arg { is_ref: true name: "output_ref" type_attr: "T" }
<a name="line-18523"></a>-}</span>
<a name="line-18524"></a>
<a name="line-18525"></a><span class='hs-comment'>-- | Checks whether a resource handle-based variable has been initialized.</span>
<a name="line-18526"></a>
<a name="line-18527"></a><a name="varIsInitializedOp"></a><span class='hs-definition'>varIsInitializedOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: the input resource handle.</span>
<a name="line-18528"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __is_initialized__: a scalar boolean which is true if the variable has been</span>
<a name="line-18529"></a>                      <span class='hs-comment'>-- initialized.</span>
<a name="line-18530"></a><span class='hs-definition'>varIsInitializedOp</span> <span class='hs-varid'>resource</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18531"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"VarIsInitializedOp"</span><span class='hs-layout'>)</span>
<a name="line-18532"></a>        <span class='hs-varid'>resource</span>
<a name="line-18533"></a><span class='hs-comment'>{-
<a name="line-18534"></a>input_arg {
<a name="line-18535"></a>  description: "the input resource handle."
<a name="line-18536"></a>  name: "resource"
<a name="line-18537"></a>  type: DT_RESOURCE
<a name="line-18538"></a>}
<a name="line-18539"></a>output_arg {
<a name="line-18540"></a>  description: "a scalar boolean which is true if the variable has been\ninitialized."
<a name="line-18541"></a>  name: "is_initialized"
<a name="line-18542"></a>  type: DT_BOOL
<a name="line-18543"></a>}
<a name="line-18544"></a>-}</span>
<a name="line-18545"></a>
<a name="line-18546"></a><a name="sparseApplyRMSProp"></a><span class='hs-comment'>-- | Update '*var' according to the RMSProp algorithm.</span>
<a name="line-18547"></a><span class='hs-comment'>--</span>
<a name="line-18548"></a><span class='hs-comment'>-- Note that in dense implementation of this algorithm, ms and mom will</span>
<a name="line-18549"></a><span class='hs-comment'>-- update even if the grad is zero, but in this sparse implementation, ms</span>
<a name="line-18550"></a><span class='hs-comment'>-- and mom will not update in iterations during which the grad is zero.</span>
<a name="line-18551"></a><span class='hs-comment'>-- </span>
<a name="line-18552"></a><span class='hs-comment'>-- mean_square = decay * mean_square + (1-decay) * gradient ** 2</span>
<a name="line-18553"></a><span class='hs-comment'>-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)</span>
<a name="line-18554"></a><span class='hs-comment'>-- </span>
<a name="line-18555"></a><span class='hs-comment'>-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad</span>
<a name="line-18556"></a><span class='hs-comment'>-- mom &lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)</span>
<a name="line-18557"></a><span class='hs-comment'>-- var &lt;- var - mom</span>
<a name="line-18558"></a><span class='hs-definition'>sparseApplyRMSProp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18559"></a>                                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18560"></a>                                                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18561"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-18562"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-18563"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-18564"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-18565"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-18566"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-18567"></a>                                                                     <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-18568"></a>                                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18569"></a>                                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-18570"></a>                                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-18571"></a>                                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18572"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-18573"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ms__: Should be from a Variable().</span>
<a name="line-18574"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __mom__: Should be from a Variable().</span>
<a name="line-18575"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-18576"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rho__: Decay rate. Must be a scalar.</span>
<a name="line-18577"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __momentum__</span>
<a name="line-18578"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __epsilon__: Ridge term. Must be a scalar.</span>
<a name="line-18579"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-18580"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.</span>
<a name="line-18581"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-18582"></a><span class='hs-definition'>sparseApplyRMSProp</span> <span class='hs-varid'>var</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span>
<a name="line-18583"></a>                   <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18584"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseApplyRMSProp"</span>
<a name="line-18585"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-18586"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18587"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>ms</span> <span class='hs-varid'>mom</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>rho</span> <span class='hs-varid'>momentum</span> <span class='hs-varid'>epsilon</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span>
<a name="line-18588"></a><span class='hs-comment'>{-
<a name="line-18589"></a>attr {
<a name="line-18590"></a>  allowed_values {
<a name="line-18591"></a>    list {
<a name="line-18592"></a>      type: DT_FLOAT
<a name="line-18593"></a>      type: DT_DOUBLE
<a name="line-18594"></a>      type: DT_INT64
<a name="line-18595"></a>      type: DT_INT32
<a name="line-18596"></a>      type: DT_UINT8
<a name="line-18597"></a>      type: DT_UINT16
<a name="line-18598"></a>      type: DT_INT16
<a name="line-18599"></a>      type: DT_INT8
<a name="line-18600"></a>      type: DT_COMPLEX64
<a name="line-18601"></a>      type: DT_COMPLEX128
<a name="line-18602"></a>      type: DT_QINT8
<a name="line-18603"></a>      type: DT_QUINT8
<a name="line-18604"></a>      type: DT_QINT32
<a name="line-18605"></a>      type: DT_HALF
<a name="line-18606"></a>    }
<a name="line-18607"></a>  }
<a name="line-18608"></a>  name: "T"
<a name="line-18609"></a>  type: "type"
<a name="line-18610"></a>}
<a name="line-18611"></a>attr {
<a name="line-18612"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-18613"></a>  name: "Tindices"
<a name="line-18614"></a>  type: "type"
<a name="line-18615"></a>}
<a name="line-18616"></a>attr {
<a name="line-18617"></a>  default_value { b: false }
<a name="line-18618"></a>  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-18619"></a>  name: "use_locking"
<a name="line-18620"></a>  type: "bool"
<a name="line-18621"></a>}
<a name="line-18622"></a>input_arg {
<a name="line-18623"></a>  description: "Should be from a Variable()."
<a name="line-18624"></a>  is_ref: true
<a name="line-18625"></a>  name: "var"
<a name="line-18626"></a>  type_attr: "T"
<a name="line-18627"></a>}
<a name="line-18628"></a>input_arg {
<a name="line-18629"></a>  description: "Should be from a Variable()."
<a name="line-18630"></a>  is_ref: true
<a name="line-18631"></a>  name: "ms"
<a name="line-18632"></a>  type_attr: "T"
<a name="line-18633"></a>}
<a name="line-18634"></a>input_arg {
<a name="line-18635"></a>  description: "Should be from a Variable()."
<a name="line-18636"></a>  is_ref: true
<a name="line-18637"></a>  name: "mom"
<a name="line-18638"></a>  type_attr: "T"
<a name="line-18639"></a>}
<a name="line-18640"></a>input_arg {
<a name="line-18641"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-18642"></a>  name: "lr"
<a name="line-18643"></a>  type_attr: "T"
<a name="line-18644"></a>}
<a name="line-18645"></a>input_arg {
<a name="line-18646"></a>  description: "Decay rate. Must be a scalar."
<a name="line-18647"></a>  name: "rho"
<a name="line-18648"></a>  type_attr: "T"
<a name="line-18649"></a>}
<a name="line-18650"></a>input_arg { name: "momentum" type_attr: "T" }
<a name="line-18651"></a>input_arg {
<a name="line-18652"></a>  description: "Ridge term. Must be a scalar."
<a name="line-18653"></a>  name: "epsilon"
<a name="line-18654"></a>  type_attr: "T"
<a name="line-18655"></a>}
<a name="line-18656"></a>input_arg {
<a name="line-18657"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-18658"></a>}
<a name="line-18659"></a>input_arg {
<a name="line-18660"></a>  description: "A vector of indices into the first dimension of var, ms and mom."
<a name="line-18661"></a>  name: "indices"
<a name="line-18662"></a>  type_attr: "Tindices"
<a name="line-18663"></a>}
<a name="line-18664"></a>output_arg {
<a name="line-18665"></a>  description: "Same as \"var\"."
<a name="line-18666"></a>  is_ref: true
<a name="line-18667"></a>  name: "out"
<a name="line-18668"></a>  type_attr: "T"
<a name="line-18669"></a>}
<a name="line-18670"></a>-}</span>
<a name="line-18671"></a>
<a name="line-18672"></a><span class='hs-comment'>-- | </span>
<a name="line-18673"></a>
<a name="line-18674"></a><a name="batchCholesky"></a><span class='hs-definition'>batchCholesky</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18675"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-18676"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-18677"></a><span class='hs-definition'>batchCholesky</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18678"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchCholesky"</span>
<a name="line-18679"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18680"></a>        <span class='hs-varid'>input</span>
<a name="line-18681"></a><span class='hs-comment'>{-
<a name="line-18682"></a>attr {
<a name="line-18683"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-18684"></a>  name: "T"
<a name="line-18685"></a>  type: "type"
<a name="line-18686"></a>}
<a name="line-18687"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-18688"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-18689"></a>-}</span>
<a name="line-18690"></a>
<a name="line-18691"></a><span class='hs-comment'>-- | </span>
<a name="line-18692"></a>
<a name="line-18693"></a><a name="tensorArrayGather"></a><span class='hs-definition'>tensorArrayGather</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18694"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-18695"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __indices__</span>
<a name="line-18696"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-18697"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-18698"></a><span class='hs-definition'>tensorArrayGather</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18699"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayGather"</span>
<a name="line-18700"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18701"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>flow_in</span>
<a name="line-18702"></a><span class='hs-comment'>{-
<a name="line-18703"></a>attr { name: "dtype" type: "type" }
<a name="line-18704"></a>attr {
<a name="line-18705"></a>  default_value { shape { unknown_rank: true } }
<a name="line-18706"></a>  name: "element_shape"
<a name="line-18707"></a>  type: "shape"
<a name="line-18708"></a>}
<a name="line-18709"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-18710"></a>input_arg { name: "indices" type: DT_INT32 }
<a name="line-18711"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-18712"></a>output_arg { name: "value" type_attr: "dtype" }
<a name="line-18713"></a>-}</span>
<a name="line-18714"></a>
<a name="line-18715"></a><a name="readerRestoreState"></a><span class='hs-comment'>-- | Restore a reader to a previously saved state.</span>
<a name="line-18716"></a><span class='hs-comment'>--</span>
<a name="line-18717"></a><span class='hs-comment'>-- Not all Readers support being restored, so this can produce an</span>
<a name="line-18718"></a><span class='hs-comment'>-- Unimplemented error.</span>
<a name="line-18719"></a><span class='hs-definition'>readerRestoreState</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-18720"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __state__: Result of a ReaderSerializeState of a Reader with type</span>
<a name="line-18721"></a>                                                              <span class='hs-comment'>-- matching reader_handle.</span>
<a name="line-18722"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-18723"></a><span class='hs-definition'>readerRestoreState</span> <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>state</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18724"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderRestoreState"</span><span class='hs-layout'>)</span>
<a name="line-18725"></a>        <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>state</span>
<a name="line-18726"></a><span class='hs-comment'>{-
<a name="line-18727"></a>input_arg {
<a name="line-18728"></a>  description: "Handle to a Reader."
<a name="line-18729"></a>  is_ref: true
<a name="line-18730"></a>  name: "reader_handle"
<a name="line-18731"></a>  type: DT_STRING
<a name="line-18732"></a>}
<a name="line-18733"></a>input_arg {
<a name="line-18734"></a>  description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle."
<a name="line-18735"></a>  name: "state"
<a name="line-18736"></a>  type: DT_STRING
<a name="line-18737"></a>}
<a name="line-18738"></a>-}</span>
<a name="line-18739"></a>
<a name="line-18740"></a><a name="sqrtGrad"></a><span class='hs-comment'>-- | Computes the gradient for the sqrt of `x` wrt its input.</span>
<a name="line-18741"></a><span class='hs-comment'>--</span>
<a name="line-18742"></a><span class='hs-comment'>-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`</span>
<a name="line-18743"></a><span class='hs-comment'>-- is the corresponding input gradient.</span>
<a name="line-18744"></a><span class='hs-definition'>sqrtGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-18745"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18746"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-18747"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18748"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-18749"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-18750"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-18751"></a><span class='hs-definition'>sqrtGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18752"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SqrtGrad"</span>
<a name="line-18753"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18754"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-18755"></a><span class='hs-comment'>{-
<a name="line-18756"></a>attr {
<a name="line-18757"></a>  allowed_values {
<a name="line-18758"></a>    list {
<a name="line-18759"></a>      type: DT_HALF
<a name="line-18760"></a>      type: DT_FLOAT
<a name="line-18761"></a>      type: DT_DOUBLE
<a name="line-18762"></a>      type: DT_COMPLEX64
<a name="line-18763"></a>      type: DT_COMPLEX128
<a name="line-18764"></a>    }
<a name="line-18765"></a>  }
<a name="line-18766"></a>  name: "T"
<a name="line-18767"></a>  type: "type"
<a name="line-18768"></a>}
<a name="line-18769"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-18770"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-18771"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-18772"></a>-}</span>
<a name="line-18773"></a>
<a name="line-18774"></a><span class='hs-comment'>-- | Splits a tensor into `num_split` tensors along one dimension.</span>
<a name="line-18775"></a>
<a name="line-18776"></a><a name="split"></a><span class='hs-definition'>split</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18777"></a>         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_split__: The number of ways to split.  Must evenly divide</span>
<a name="line-18778"></a>                        <span class='hs-comment'>-- `value.shape[split_dim]`.</span>
<a name="line-18779"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range</span>
<a name="line-18780"></a>                                     <span class='hs-comment'>-- `[0, rank(value))`.</span>
<a name="line-18781"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The tensor to split.</span>
<a name="line-18782"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __output__: They are identically shaped tensors, whose shape matches that of `value`</span>
<a name="line-18783"></a>         <span class='hs-comment'>-- except along `split_dim`, where their sizes are</span>
<a name="line-18784"></a>         <span class='hs-comment'>-- `values.shape[split_dim] / num_split`.</span>
<a name="line-18785"></a><span class='hs-definition'>split</span> <span class='hs-varid'>num_split</span> <span class='hs-varid'>split_dim</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18786"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num_split</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Split"</span>
<a name="line-18787"></a>                             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-18788"></a>                             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_split"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_split</span><span class='hs-layout'>)</span>
<a name="line-18789"></a>        <span class='hs-varid'>split_dim</span> <span class='hs-varid'>value</span>
<a name="line-18790"></a><span class='hs-comment'>{-
<a name="line-18791"></a>attr {
<a name="line-18792"></a>  description: "The number of ways to split.  Must evenly divide\n`value.shape[split_dim]`."
<a name="line-18793"></a>  has_minimum: true
<a name="line-18794"></a>  minimum: 1
<a name="line-18795"></a>  name: "num_split"
<a name="line-18796"></a>  type: "int"
<a name="line-18797"></a>}
<a name="line-18798"></a>attr { name: "T" type: "type" }
<a name="line-18799"></a>input_arg {
<a name="line-18800"></a>  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(value))`."
<a name="line-18801"></a>  name: "split_dim"
<a name="line-18802"></a>  type: DT_INT32
<a name="line-18803"></a>}
<a name="line-18804"></a>input_arg {
<a name="line-18805"></a>  description: "The tensor to split." name: "value" type_attr: "T"
<a name="line-18806"></a>}
<a name="line-18807"></a>output_arg {
<a name="line-18808"></a>  description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`."
<a name="line-18809"></a>  name: "output"
<a name="line-18810"></a>  number_attr: "num_split"
<a name="line-18811"></a>  type_attr: "T"
<a name="line-18812"></a>}
<a name="line-18813"></a>-}</span>
<a name="line-18814"></a>
<a name="line-18815"></a><span class='hs-comment'>-- | A Reader that outputs the lines of a file delimited by '\n'.</span>
<a name="line-18816"></a>
<a name="line-18817"></a><a name="textLineReader"></a><span class='hs-definition'>textLineReader</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __reader_handle__: The handle to reference the Reader.</span>
<a name="line-18818"></a><span class='hs-definition'>textLineReader</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18819"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TextLineReader"</span><span class='hs-layout'>)</span>
<a name="line-18820"></a>        
<a name="line-18821"></a><span class='hs-comment'>{-
<a name="line-18822"></a>attr {
<a name="line-18823"></a>  default_value { i: 0 }
<a name="line-18824"></a>  description: "Number of lines to skip from the beginning of every file."
<a name="line-18825"></a>  name: "skip_header_lines"
<a name="line-18826"></a>  type: "int"
<a name="line-18827"></a>}
<a name="line-18828"></a>attr {
<a name="line-18829"></a>  default_value { s: "" }
<a name="line-18830"></a>  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
<a name="line-18831"></a>  name: "container"
<a name="line-18832"></a>  type: "string"
<a name="line-18833"></a>}
<a name="line-18834"></a>attr {
<a name="line-18835"></a>  default_value { s: "" }
<a name="line-18836"></a>  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-18837"></a>  name: "shared_name"
<a name="line-18838"></a>  type: "string"
<a name="line-18839"></a>}
<a name="line-18840"></a>output_arg {
<a name="line-18841"></a>  description: "The handle to reference the Reader."
<a name="line-18842"></a>  is_ref: true
<a name="line-18843"></a>  name: "reader_handle"
<a name="line-18844"></a>  type: DT_STRING
<a name="line-18845"></a>}
<a name="line-18846"></a>-}</span>
<a name="line-18847"></a>
<a name="line-18848"></a><a name="matrixBandPart"></a><span class='hs-comment'>-- | Copy a tensor setting everything outside a central band in each innermost matrix</span>
<a name="line-18849"></a><span class='hs-comment'>--</span>
<a name="line-18850"></a><span class='hs-comment'>-- to zero.</span>
<a name="line-18851"></a><span class='hs-comment'>-- </span>
<a name="line-18852"></a><span class='hs-comment'>-- The `band` part is computed as follows:</span>
<a name="line-18853"></a><span class='hs-comment'>-- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a</span>
<a name="line-18854"></a><span class='hs-comment'>-- tensor with the same shape where</span>
<a name="line-18855"></a><span class='hs-comment'>-- </span>
<a name="line-18856"></a><span class='hs-comment'>-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.</span>
<a name="line-18857"></a><span class='hs-comment'>-- </span>
<a name="line-18858"></a><span class='hs-comment'>-- The indicator function</span>
<a name="line-18859"></a><span class='hs-comment'>-- </span>
<a name="line-18860"></a><span class='hs-comment'>-- `in_band(m, n) = (num_lower &lt; 0 || (m-n) &lt;= num_lower)) &amp;&amp;</span>
<a name="line-18861"></a><span class='hs-comment'>--                  (num_upper &lt; 0 || (n-m) &lt;= num_upper)`.</span>
<a name="line-18862"></a><span class='hs-comment'>-- </span>
<a name="line-18863"></a><span class='hs-comment'>-- For example:</span>
<a name="line-18864"></a><span class='hs-comment'>-- </span>
<a name="line-18865"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-18866"></a><span class='hs-comment'>-- # if 'input' is [[ 0,  1,  2, 3]</span>
<a name="line-18867"></a><span class='hs-comment'>--                  [-1,  0,  1, 2]</span>
<a name="line-18868"></a><span class='hs-comment'>--                  [-2, -1,  0, 1]</span>
<a name="line-18869"></a><span class='hs-comment'>--                  [-3, -2, -1, 0]],</span>
<a name="line-18870"></a><span class='hs-comment'>-- </span>
<a name="line-18871"></a><span class='hs-comment'>-- tf.matrix_band_part(input, 1, -1) ==&gt; [[ 0,  1,  2, 3]</span>
<a name="line-18872"></a><span class='hs-comment'>--                                        [-1,  0,  1, 2]</span>
<a name="line-18873"></a><span class='hs-comment'>--                                        [ 0, -1,  0, 1]</span>
<a name="line-18874"></a><span class='hs-comment'>--                                        [ 0,  0, -1, 0]],</span>
<a name="line-18875"></a><span class='hs-comment'>-- </span>
<a name="line-18876"></a><span class='hs-comment'>-- tf.matrix_band_part(input, 2, 1) ==&gt; [[ 0,  1,  0, 0]</span>
<a name="line-18877"></a><span class='hs-comment'>--                                       [-1,  0,  1, 0]</span>
<a name="line-18878"></a><span class='hs-comment'>--                                       [-2, -1,  0, 1]</span>
<a name="line-18879"></a><span class='hs-comment'>--                                       [ 0, -2, -1, 0]]</span>
<a name="line-18880"></a><span class='hs-comment'>-- ```</span>
<a name="line-18881"></a><span class='hs-comment'>-- </span>
<a name="line-18882"></a><span class='hs-comment'>-- Useful special cases:</span>
<a name="line-18883"></a><span class='hs-comment'>-- </span>
<a name="line-18884"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-18885"></a><span class='hs-comment'>--  tf.matrix_band_part(input, 0, -1) ==&gt; Upper triangular part.</span>
<a name="line-18886"></a><span class='hs-comment'>--  tf.matrix_band_part(input, -1, 0) ==&gt; Lower triangular part.</span>
<a name="line-18887"></a><span class='hs-comment'>--  tf.matrix_band_part(input, 0, 0) ==&gt; Diagonal.</span>
<a name="line-18888"></a><span class='hs-comment'>-- ```</span>
<a name="line-18889"></a><span class='hs-definition'>matrixBandPart</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-18890"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Rank `k` tensor.</span>
<a name="line-18891"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire</span>
<a name="line-18892"></a>                                              <span class='hs-comment'>-- lower triangle.</span>
<a name="line-18893"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep</span>
<a name="line-18894"></a>                                              <span class='hs-comment'>-- entire upper triangle.</span>
<a name="line-18895"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor.</span>
<a name="line-18896"></a><span class='hs-definition'>matrixBandPart</span> <span class='hs-varid'>input</span> <span class='hs-varid'>num_lower</span> <span class='hs-varid'>num_upper</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18897"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixBandPart"</span>
<a name="line-18898"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-18899"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>num_lower</span> <span class='hs-varid'>num_upper</span>
<a name="line-18900"></a><span class='hs-comment'>{-
<a name="line-18901"></a>attr { name: "T" type: "type" }
<a name="line-18902"></a>input_arg {
<a name="line-18903"></a>  description: "Rank `k` tensor." name: "input" type_attr: "T"
<a name="line-18904"></a>}
<a name="line-18905"></a>input_arg {
<a name="line-18906"></a>  description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle."
<a name="line-18907"></a>  name: "num_lower"
<a name="line-18908"></a>  type: DT_INT64
<a name="line-18909"></a>}
<a name="line-18910"></a>input_arg {
<a name="line-18911"></a>  description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle."
<a name="line-18912"></a>  name: "num_upper"
<a name="line-18913"></a>  type: DT_INT64
<a name="line-18914"></a>}
<a name="line-18915"></a>output_arg {
<a name="line-18916"></a>  description: "Rank `k` tensor of the same shape as input. The extracted banded tensor."
<a name="line-18917"></a>  name: "band"
<a name="line-18918"></a>  type_attr: "T"
<a name="line-18919"></a>}
<a name="line-18920"></a>-}</span>
<a name="line-18921"></a>
<a name="line-18922"></a><a name="queueClose"></a><span class='hs-comment'>-- | Closes the given queue.</span>
<a name="line-18923"></a><span class='hs-comment'>--</span>
<a name="line-18924"></a><span class='hs-comment'>-- This operation signals that no more elements will be enqueued in the</span>
<a name="line-18925"></a><span class='hs-comment'>-- given queue. Subsequent Enqueue(Many) operations will fail.</span>
<a name="line-18926"></a><span class='hs-comment'>-- Subsequent Dequeue(Many) operations will continue to succeed if</span>
<a name="line-18927"></a><span class='hs-comment'>-- sufficient elements remain in the queue. Subsequent Dequeue(Many)</span>
<a name="line-18928"></a><span class='hs-comment'>-- operations that would block will fail immediately.</span>
<a name="line-18929"></a><span class='hs-definition'>queueClose</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a queue.</span>
<a name="line-18930"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-18931"></a><span class='hs-definition'>queueClose</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18932"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QueueClose"</span><span class='hs-layout'>)</span>
<a name="line-18933"></a>        <span class='hs-varid'>handle</span>
<a name="line-18934"></a><span class='hs-comment'>{-
<a name="line-18935"></a>attr {
<a name="line-18936"></a>  default_value { b: false }
<a name="line-18937"></a>  description: "If true, all pending enqueue requests that are\nblocked on the given queue will be cancelled."
<a name="line-18938"></a>  name: "cancel_pending_enqueues"
<a name="line-18939"></a>  type: "bool"
<a name="line-18940"></a>}
<a name="line-18941"></a>input_arg {
<a name="line-18942"></a>  description: "The handle to a queue."
<a name="line-18943"></a>  is_ref: true
<a name="line-18944"></a>  name: "handle"
<a name="line-18945"></a>  type: DT_STRING
<a name="line-18946"></a>}
<a name="line-18947"></a>-}</span>
<a name="line-18948"></a>
<a name="line-18949"></a><a name="mergeV2Checkpoints"></a><span class='hs-comment'>-- | V2 format specific: merges the metadata files of sharded checkpoints.  The</span>
<a name="line-18950"></a><span class='hs-comment'>--</span>
<a name="line-18951"></a><span class='hs-comment'>-- result is one logical checkpoint, with one physical metadata file and renamed</span>
<a name="line-18952"></a><span class='hs-comment'>-- data files.</span>
<a name="line-18953"></a><span class='hs-comment'>-- </span>
<a name="line-18954"></a><span class='hs-comment'>-- Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.</span>
<a name="line-18955"></a><span class='hs-comment'>-- </span>
<a name="line-18956"></a><span class='hs-comment'>-- If delete_old_dirs is true, attempts to delete recursively the dirname of each</span>
<a name="line-18957"></a><span class='hs-comment'>-- path in the input checkpoint_prefixes.  This is useful when those paths are non</span>
<a name="line-18958"></a><span class='hs-comment'>-- user-facing temporary locations.</span>
<a name="line-18959"></a><span class='hs-definition'>mergeV2Checkpoints</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge.</span>
<a name="line-18960"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __destination_prefix__: scalar.  The desired final prefix.  Allowed to be the same</span>
<a name="line-18961"></a>                                                              <span class='hs-comment'>-- as one of the checkpoint_prefixes.</span>
<a name="line-18962"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>ControlNode</span>
<a name="line-18963"></a><span class='hs-definition'>mergeV2Checkpoints</span> <span class='hs-varid'>checkpoint_prefixes</span> <span class='hs-varid'>destination_prefix</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18964"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MergeV2Checkpoints"</span><span class='hs-layout'>)</span>
<a name="line-18965"></a>        <span class='hs-varid'>checkpoint_prefixes</span> <span class='hs-varid'>destination_prefix</span>
<a name="line-18966"></a><span class='hs-comment'>{-
<a name="line-18967"></a>attr {
<a name="line-18968"></a>  default_value { b: true }
<a name="line-18969"></a>  description: "see above."
<a name="line-18970"></a>  name: "delete_old_dirs"
<a name="line-18971"></a>  type: "bool"
<a name="line-18972"></a>}
<a name="line-18973"></a>input_arg {
<a name="line-18974"></a>  description: "prefixes of V2 checkpoints to merge."
<a name="line-18975"></a>  name: "checkpoint_prefixes"
<a name="line-18976"></a>  type: DT_STRING
<a name="line-18977"></a>}
<a name="line-18978"></a>input_arg {
<a name="line-18979"></a>  description: "scalar.  The desired final prefix.  Allowed to be the same\nas one of the checkpoint_prefixes."
<a name="line-18980"></a>  name: "destination_prefix"
<a name="line-18981"></a>  type: DT_STRING
<a name="line-18982"></a>}
<a name="line-18983"></a>-}</span>
<a name="line-18984"></a>
<a name="line-18985"></a><span class='hs-comment'>-- | Computes the number of complete elements in the given barrier.</span>
<a name="line-18986"></a>
<a name="line-18987"></a><a name="barrierReadySize"></a><span class='hs-definition'>barrierReadySize</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a barrier.</span>
<a name="line-18988"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __size__: The number of complete elements (i.e. those with all of their value</span>
<a name="line-18989"></a>                    <span class='hs-comment'>-- components set) in the barrier.</span>
<a name="line-18990"></a><span class='hs-definition'>barrierReadySize</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-18991"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BarrierReadySize"</span><span class='hs-layout'>)</span>
<a name="line-18992"></a>        <span class='hs-varid'>handle</span>
<a name="line-18993"></a><span class='hs-comment'>{-
<a name="line-18994"></a>input_arg {
<a name="line-18995"></a>  description: "The handle to a barrier."
<a name="line-18996"></a>  is_ref: true
<a name="line-18997"></a>  name: "handle"
<a name="line-18998"></a>  type: DT_STRING
<a name="line-18999"></a>}
<a name="line-19000"></a>output_arg {
<a name="line-19001"></a>  description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier."
<a name="line-19002"></a>  name: "size"
<a name="line-19003"></a>  type: DT_INT32
<a name="line-19004"></a>}
<a name="line-19005"></a>-}</span>
<a name="line-19006"></a>
<a name="line-19007"></a><span class='hs-comment'>-- | A queue that randomizes the order of elements.</span>
<a name="line-19008"></a>
<a name="line-19009"></a><a name="randomShuffleQueue"></a><span class='hs-definition'>randomShuffleQueue</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __handle__: The handle to the queue.</span>
<a name="line-19010"></a><span class='hs-definition'>randomShuffleQueue</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19011"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomShuffleQueue"</span><span class='hs-layout'>)</span>
<a name="line-19012"></a>        
<a name="line-19013"></a><span class='hs-comment'>{-
<a name="line-19014"></a>attr {
<a name="line-19015"></a>  description: "The type of each component in a value."
<a name="line-19016"></a>  has_minimum: true
<a name="line-19017"></a>  minimum: 1
<a name="line-19018"></a>  name: "component_types"
<a name="line-19019"></a>  type: "list(type)"
<a name="line-19020"></a>}
<a name="line-19021"></a>attr {
<a name="line-19022"></a>  default_value { list { } }
<a name="line-19023"></a>  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
<a name="line-19024"></a>  has_minimum: true
<a name="line-19025"></a>  name: "shapes"
<a name="line-19026"></a>  type: "list(shape)"
<a name="line-19027"></a>}
<a name="line-19028"></a>attr {
<a name="line-19029"></a>  default_value { i: -1 }
<a name="line-19030"></a>  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
<a name="line-19031"></a>  name: "capacity"
<a name="line-19032"></a>  type: "int"
<a name="line-19033"></a>}
<a name="line-19034"></a>attr {
<a name="line-19035"></a>  default_value { i: 0 }
<a name="line-19036"></a>  description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements."
<a name="line-19037"></a>  name: "min_after_dequeue"
<a name="line-19038"></a>  type: "int"
<a name="line-19039"></a>}
<a name="line-19040"></a>attr {
<a name="line-19041"></a>  default_value { i: 0 }
<a name="line-19042"></a>  description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
<a name="line-19043"></a>  name: "seed"
<a name="line-19044"></a>  type: "int"
<a name="line-19045"></a>}
<a name="line-19046"></a>attr {
<a name="line-19047"></a>  default_value { i: 0 }
<a name="line-19048"></a>  description: "A second seed to avoid seed collision."
<a name="line-19049"></a>  name: "seed2"
<a name="line-19050"></a>  type: "int"
<a name="line-19051"></a>}
<a name="line-19052"></a>attr {
<a name="line-19053"></a>  default_value { s: "" }
<a name="line-19054"></a>  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
<a name="line-19055"></a>  name: "container"
<a name="line-19056"></a>  type: "string"
<a name="line-19057"></a>}
<a name="line-19058"></a>attr {
<a name="line-19059"></a>  default_value { s: "" }
<a name="line-19060"></a>  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
<a name="line-19061"></a>  name: "shared_name"
<a name="line-19062"></a>  type: "string"
<a name="line-19063"></a>}
<a name="line-19064"></a>output_arg {
<a name="line-19065"></a>  description: "The handle to the queue."
<a name="line-19066"></a>  is_ref: true
<a name="line-19067"></a>  name: "handle"
<a name="line-19068"></a>  type: DT_STRING
<a name="line-19069"></a>}
<a name="line-19070"></a>-}</span>
<a name="line-19071"></a>
<a name="line-19072"></a><a name="notEqual"></a><span class='hs-comment'>-- | Returns the truth value of (x != y) element-wise.</span>
<a name="line-19073"></a><span class='hs-comment'>--</span>
<a name="line-19074"></a><span class='hs-comment'>-- *NOTE*: `NotEqual` supports broadcasting. More about broadcasting</span>
<a name="line-19075"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-19076"></a><span class='hs-definition'>notEqual</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-19077"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-19078"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span> <span class='hs-conid'>Bool</span><span class='hs-layout'>,</span>
<a name="line-19079"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-19080"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19081"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-19082"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19083"></a>                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-19084"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-19085"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-19086"></a><span class='hs-definition'>notEqual</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19087"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"NotEqual"</span>
<a name="line-19088"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19089"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-19090"></a><span class='hs-comment'>{-
<a name="line-19091"></a>attr {
<a name="line-19092"></a>  allowed_values {
<a name="line-19093"></a>    list {
<a name="line-19094"></a>      type: DT_HALF
<a name="line-19095"></a>      type: DT_FLOAT
<a name="line-19096"></a>      type: DT_DOUBLE
<a name="line-19097"></a>      type: DT_UINT8
<a name="line-19098"></a>      type: DT_INT8
<a name="line-19099"></a>      type: DT_INT16
<a name="line-19100"></a>      type: DT_INT32
<a name="line-19101"></a>      type: DT_INT64
<a name="line-19102"></a>      type: DT_COMPLEX64
<a name="line-19103"></a>      type: DT_QUINT8
<a name="line-19104"></a>      type: DT_QINT8
<a name="line-19105"></a>      type: DT_QINT32
<a name="line-19106"></a>      type: DT_STRING
<a name="line-19107"></a>      type: DT_BOOL
<a name="line-19108"></a>      type: DT_COMPLEX128
<a name="line-19109"></a>    }
<a name="line-19110"></a>  }
<a name="line-19111"></a>  name: "T"
<a name="line-19112"></a>  type: "type"
<a name="line-19113"></a>}
<a name="line-19114"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-19115"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-19116"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-19117"></a>-}</span>
<a name="line-19118"></a>
<a name="line-19119"></a><a name="nonMaxSuppression"></a><span class='hs-comment'>-- | Greedily selects a subset of bounding boxes in descending order of score,</span>
<a name="line-19120"></a><span class='hs-comment'>--</span>
<a name="line-19121"></a><span class='hs-comment'>-- pruning away boxes that have high intersection-over-union (IOU) overlap</span>
<a name="line-19122"></a><span class='hs-comment'>-- with previously selected boxes.  Bounding boxes are supplied as</span>
<a name="line-19123"></a><span class='hs-comment'>-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any</span>
<a name="line-19124"></a><span class='hs-comment'>-- diagonal pair of box corners and the coordinates can be provided as normalized</span>
<a name="line-19125"></a><span class='hs-comment'>-- (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm</span>
<a name="line-19126"></a><span class='hs-comment'>-- is agnostic to where the origin is in the coordinate system.  Note that this</span>
<a name="line-19127"></a><span class='hs-comment'>-- algorithm is invariant to orthogonal transformations and translations</span>
<a name="line-19128"></a><span class='hs-comment'>-- of the coordinate system; thus translating or reflections of the coordinate</span>
<a name="line-19129"></a><span class='hs-comment'>-- system result in the same boxes being selected by the algorithm.</span>
<a name="line-19130"></a><span class='hs-comment'>-- </span>
<a name="line-19131"></a><span class='hs-comment'>-- The output of this operation is a set of integers indexing into the input</span>
<a name="line-19132"></a><span class='hs-comment'>-- collection of bounding boxes representing the selected boxes.  The bounding</span>
<a name="line-19133"></a><span class='hs-comment'>-- box coordinates corresponding to the selected indices can then be obtained</span>
<a name="line-19134"></a><span class='hs-comment'>-- using the `tf.gather operation`.  For example:</span>
<a name="line-19135"></a><span class='hs-comment'>-- </span>
<a name="line-19136"></a><span class='hs-comment'>--   selected_indices = tf.image.non_max_suppression(</span>
<a name="line-19137"></a><span class='hs-comment'>--       boxes, scores, max_output_size, iou_threshold)</span>
<a name="line-19138"></a><span class='hs-comment'>--   selected_boxes = tf.gather(boxes, selected_indices)</span>
<a name="line-19139"></a><span class='hs-definition'>nonMaxSuppression</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.</span>
<a name="line-19140"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single</span>
<a name="line-19141"></a>                                        <span class='hs-comment'>-- score corresponding to each box (each row of boxes).</span>
<a name="line-19142"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __max_output_size__: A scalar integer tensor representing the maximum number of</span>
<a name="line-19143"></a>                                                 <span class='hs-comment'>-- boxes to be selected by non max suppression.</span>
<a name="line-19144"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected</span>
<a name="line-19145"></a>                     <span class='hs-comment'>-- indices from the boxes tensor, where `M &lt;= max_output_size`.</span>
<a name="line-19146"></a><span class='hs-definition'>nonMaxSuppression</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>scores</span> <span class='hs-varid'>max_output_size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19147"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"NonMaxSuppression"</span><span class='hs-layout'>)</span>
<a name="line-19148"></a>        <span class='hs-varid'>boxes</span> <span class='hs-varid'>scores</span> <span class='hs-varid'>max_output_size</span>
<a name="line-19149"></a><span class='hs-comment'>{-
<a name="line-19150"></a>attr {
<a name="line-19151"></a>  default_value { f: 0.5 }
<a name="line-19152"></a>  description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU."
<a name="line-19153"></a>  name: "iou_threshold"
<a name="line-19154"></a>  type: "float"
<a name="line-19155"></a>}
<a name="line-19156"></a>input_arg {
<a name="line-19157"></a>  description: "A 2-D float tensor of shape `[num_boxes, 4]`."
<a name="line-19158"></a>  name: "boxes"
<a name="line-19159"></a>  type: DT_FLOAT
<a name="line-19160"></a>}
<a name="line-19161"></a>input_arg {
<a name="line-19162"></a>  description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)."
<a name="line-19163"></a>  name: "scores"
<a name="line-19164"></a>  type: DT_FLOAT
<a name="line-19165"></a>}
<a name="line-19166"></a>input_arg {
<a name="line-19167"></a>  description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression."
<a name="line-19168"></a>  name: "max_output_size"
<a name="line-19169"></a>  type: DT_INT32
<a name="line-19170"></a>}
<a name="line-19171"></a>output_arg {
<a name="line-19172"></a>  description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M &lt;= max_output_size`."
<a name="line-19173"></a>  name: "selected_indices"
<a name="line-19174"></a>  type: DT_INT32
<a name="line-19175"></a>}
<a name="line-19176"></a>-}</span>
<a name="line-19177"></a>
<a name="line-19178"></a><span class='hs-comment'>-- | </span>
<a name="line-19179"></a>
<a name="line-19180"></a><a name="tensorArrayWrite"></a><span class='hs-definition'>tensorArrayWrite</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19181"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-19182"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __index__</span>
<a name="line-19183"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-19184"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-19185"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __flow_out__</span>
<a name="line-19186"></a><span class='hs-definition'>tensorArrayWrite</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19187"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayWrite"</span>
<a name="line-19188"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19189"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span>
<a name="line-19190"></a><span class='hs-comment'>{-
<a name="line-19191"></a>attr { name: "T" type: "type" }
<a name="line-19192"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-19193"></a>input_arg { name: "index" type: DT_INT32 }
<a name="line-19194"></a>input_arg { name: "value" type_attr: "T" }
<a name="line-19195"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-19196"></a>output_arg { name: "flow_out" type: DT_FLOAT }
<a name="line-19197"></a>-}</span>
<a name="line-19198"></a>
<a name="line-19199"></a><a name="quantizeAndDequantize"></a><span class='hs-comment'>-- | Quantizes then dequantizes a tensor.</span>
<a name="line-19200"></a><span class='hs-comment'>--</span>
<a name="line-19201"></a><span class='hs-comment'>-- This op simulates the precision loss from the quantized forward pass by:</span>
<a name="line-19202"></a><span class='hs-comment'>-- 1. Quantizing the tensor to fixed point numbers, which should match the target</span>
<a name="line-19203"></a><span class='hs-comment'>--    quantization method when it is used in inference.</span>
<a name="line-19204"></a><span class='hs-comment'>-- 2. Dequantizing it back to floating point numbers for the following ops, most</span>
<a name="line-19205"></a><span class='hs-comment'>--    likely matmul.</span>
<a name="line-19206"></a><span class='hs-comment'>-- </span>
<a name="line-19207"></a><span class='hs-comment'>-- There are different ways to quantize. This version does not use the full range</span>
<a name="line-19208"></a><span class='hs-comment'>-- of the output type, choosing to elide the lowest possible value for symmetry</span>
<a name="line-19209"></a><span class='hs-comment'>-- (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit</span>
<a name="line-19210"></a><span class='hs-comment'>-- quantization), so that 0.0 maps to 0.</span>
<a name="line-19211"></a><span class='hs-comment'>-- </span>
<a name="line-19212"></a><span class='hs-comment'>-- To perform this op, we first find the range of values in our tensor. The range</span>
<a name="line-19213"></a><span class='hs-comment'>-- we use is always centered on 0, so we find m such that</span>
<a name="line-19214"></a><span class='hs-comment'>-- </span>
<a name="line-19215"></a><span class='hs-comment'>-- 1. m = max(abs(input_min), abs(input_max)) if range_given is true,</span>
<a name="line-19216"></a><span class='hs-comment'>-- 2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.</span>
<a name="line-19217"></a><span class='hs-comment'>-- </span>
<a name="line-19218"></a><span class='hs-comment'>-- Our input tensor range is then [-m, m].</span>
<a name="line-19219"></a><span class='hs-comment'>-- </span>
<a name="line-19220"></a><span class='hs-comment'>-- Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].</span>
<a name="line-19221"></a><span class='hs-comment'>-- If signed_input is true, this is</span>
<a name="line-19222"></a><span class='hs-comment'>-- </span>
<a name="line-19223"></a><span class='hs-comment'>--   [min_fixed, max_fixed ] =</span>
<a name="line-19224"></a><span class='hs-comment'>--       [-(1 &lt;&lt; (num_bits - 1) - 1), (1 &lt;&lt; (num_bits - 1)) - 1].</span>
<a name="line-19225"></a><span class='hs-comment'>-- </span>
<a name="line-19226"></a><span class='hs-comment'>-- Otherwise, if signed_input is false, the fixed-point range is</span>
<a name="line-19227"></a><span class='hs-comment'>-- </span>
<a name="line-19228"></a><span class='hs-comment'>--   [min_fixed, max_fixed] = [0, (1 &lt;&lt; num_bits) - 1].</span>
<a name="line-19229"></a><span class='hs-comment'>-- </span>
<a name="line-19230"></a><span class='hs-comment'>-- From this we compute our scaling factor, s:</span>
<a name="line-19231"></a><span class='hs-comment'>-- </span>
<a name="line-19232"></a><span class='hs-comment'>--   s = (max_fixed - min_fixed) / (2 * m).</span>
<a name="line-19233"></a><span class='hs-comment'>-- </span>
<a name="line-19234"></a><span class='hs-comment'>-- Now we can quantize and dequantize the elements of our tensor.  An element e</span>
<a name="line-19235"></a><span class='hs-comment'>-- is transformed into e':</span>
<a name="line-19236"></a><span class='hs-comment'>-- </span>
<a name="line-19237"></a><span class='hs-comment'>--   e' = (e * s).round_to_nearest() / s.</span>
<a name="line-19238"></a><span class='hs-comment'>-- </span>
<a name="line-19239"></a><span class='hs-comment'>-- Note that we have a different number of buckets in the signed vs. unsigned</span>
<a name="line-19240"></a><span class='hs-comment'>-- cases.  For example, if num_bits == 8, we get 254 buckets in the signed case</span>
<a name="line-19241"></a><span class='hs-comment'>-- vs. 255 in the unsigned case.</span>
<a name="line-19242"></a><span class='hs-comment'>-- </span>
<a name="line-19243"></a><span class='hs-comment'>-- For example, suppose num_bits = 8 and m = 1.  Then</span>
<a name="line-19244"></a><span class='hs-comment'>-- </span>
<a name="line-19245"></a><span class='hs-comment'>--   [min_fixed, max_fixed] = [-127, 127], and</span>
<a name="line-19246"></a><span class='hs-comment'>--   s = (127 + 127) / 2 = 127.</span>
<a name="line-19247"></a><span class='hs-comment'>-- </span>
<a name="line-19248"></a><span class='hs-comment'>-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to</span>
<a name="line-19249"></a><span class='hs-comment'>-- {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.</span>
<a name="line-19250"></a><span class='hs-definition'>quantizeAndDequantize</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19251"></a>                                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19252"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Tensor to quantize and then dequantize.</span>
<a name="line-19253"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-19254"></a><span class='hs-definition'>quantizeAndDequantize</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19255"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizeAndDequantize"</span>
<a name="line-19256"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19257"></a>        <span class='hs-varid'>input</span>
<a name="line-19258"></a><span class='hs-comment'>{-
<a name="line-19259"></a>attr {
<a name="line-19260"></a>  default_value { b: true }
<a name="line-19261"></a>  description: "If the quantization is signed or unsigned."
<a name="line-19262"></a>  name: "signed_input"
<a name="line-19263"></a>  type: "bool"
<a name="line-19264"></a>}
<a name="line-19265"></a>attr {
<a name="line-19266"></a>  default_value { i: 8 }
<a name="line-19267"></a>  description: "The bitwidth of the quantization."
<a name="line-19268"></a>  name: "num_bits"
<a name="line-19269"></a>  type: "int"
<a name="line-19270"></a>}
<a name="line-19271"></a>attr {
<a name="line-19272"></a>  default_value { b: false }
<a name="line-19273"></a>  description: "If the range is given or should be computed from the tensor."
<a name="line-19274"></a>  name: "range_given"
<a name="line-19275"></a>  type: "bool"
<a name="line-19276"></a>}
<a name="line-19277"></a>attr {
<a name="line-19278"></a>  default_value { f: 0.0 }
<a name="line-19279"></a>  description: "If range is given, this is the min of the range."
<a name="line-19280"></a>  name: "input_min"
<a name="line-19281"></a>  type: "float"
<a name="line-19282"></a>}
<a name="line-19283"></a>attr {
<a name="line-19284"></a>  default_value { f: 0.0 }
<a name="line-19285"></a>  description: "If range is given, this is the max of the range."
<a name="line-19286"></a>  name: "input_max"
<a name="line-19287"></a>  type: "float"
<a name="line-19288"></a>}
<a name="line-19289"></a>attr {
<a name="line-19290"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-19291"></a>  name: "T"
<a name="line-19292"></a>  type: "type"
<a name="line-19293"></a>}
<a name="line-19294"></a>input_arg {
<a name="line-19295"></a>  description: "Tensor to quantize and then dequantize."
<a name="line-19296"></a>  name: "input"
<a name="line-19297"></a>  type_attr: "T"
<a name="line-19298"></a>}
<a name="line-19299"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-19300"></a>-}</span>
<a name="line-19301"></a>
<a name="line-19302"></a><a name="readerRead"></a><span class='hs-comment'>-- | Returns the next record (key, value pair) produced by a Reader.</span>
<a name="line-19303"></a><span class='hs-comment'>--</span>
<a name="line-19304"></a><span class='hs-comment'>-- Will dequeue from the input queue if necessary (e.g. when the</span>
<a name="line-19305"></a><span class='hs-comment'>-- Reader needs to start reading from a new file since it has finished</span>
<a name="line-19306"></a><span class='hs-comment'>-- with the previous file).</span>
<a name="line-19307"></a><span class='hs-definition'>readerRead</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-19308"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __queue_handle__: Handle to a Queue, with string work items.</span>
<a name="line-19309"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-19310"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19311"></a>              <span class='hs-comment'>-- ^ (__key__, __value__)</span>
<a name="line-19312"></a>              <span class='hs-comment'>--</span>
<a name="line-19313"></a>              <span class='hs-comment'>-- * __key__: A scalar.</span>
<a name="line-19314"></a>              <span class='hs-comment'>--</span>
<a name="line-19315"></a>              <span class='hs-comment'>-- * __value__: A scalar.</span>
<a name="line-19316"></a><span class='hs-definition'>readerRead</span> <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>queue_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19317"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderRead"</span><span class='hs-layout'>)</span>
<a name="line-19318"></a>        <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>queue_handle</span>
<a name="line-19319"></a><span class='hs-comment'>{-
<a name="line-19320"></a>input_arg {
<a name="line-19321"></a>  description: "Handle to a Reader."
<a name="line-19322"></a>  is_ref: true
<a name="line-19323"></a>  name: "reader_handle"
<a name="line-19324"></a>  type: DT_STRING
<a name="line-19325"></a>}
<a name="line-19326"></a>input_arg {
<a name="line-19327"></a>  description: "Handle to a Queue, with string work items."
<a name="line-19328"></a>  is_ref: true
<a name="line-19329"></a>  name: "queue_handle"
<a name="line-19330"></a>  type: DT_STRING
<a name="line-19331"></a>}
<a name="line-19332"></a>output_arg { description: "A scalar." name: "key" type: DT_STRING }
<a name="line-19333"></a>output_arg {
<a name="line-19334"></a>  description: "A scalar." name: "value" type: DT_STRING
<a name="line-19335"></a>}
<a name="line-19336"></a>-}</span>
<a name="line-19337"></a>
<a name="line-19338"></a><a name="matrixTriangularSolve"></a><span class='hs-comment'>-- | Solves systems of linear equations with upper or lower triangular matrices by</span>
<a name="line-19339"></a><span class='hs-comment'>--</span>
<a name="line-19340"></a><span class='hs-comment'>-- backsubstitution.</span>
<a name="line-19341"></a><span class='hs-comment'>-- </span>
<a name="line-19342"></a><span class='hs-comment'>-- `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form</span>
<a name="line-19343"></a><span class='hs-comment'>-- square matrices. If `lower` is `True` then the strictly upper triangular part</span>
<a name="line-19344"></a><span class='hs-comment'>-- of each inner-most matrix is assumed to be zero and not accessed.</span>
<a name="line-19345"></a><span class='hs-comment'>-- If `lower` is False then the strictly lower triangular part of each inner-most</span>
<a name="line-19346"></a><span class='hs-comment'>-- matrix is assumed to be zero and not accessed.</span>
<a name="line-19347"></a><span class='hs-comment'>-- `rhs` is a tensor of shape `[..., M, K]`.</span>
<a name="line-19348"></a><span class='hs-comment'>-- </span>
<a name="line-19349"></a><span class='hs-comment'>-- The output is a tensor of shape `[..., M, K]`. If `adjoint` is</span>
<a name="line-19350"></a><span class='hs-comment'>-- `True` then the innermost matrices in output` satisfy matrix equations</span>
<a name="line-19351"></a><span class='hs-comment'>-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.</span>
<a name="line-19352"></a><span class='hs-comment'>-- If `adjoint` is `False` then the strictly then the  innermost matrices in</span>
<a name="line-19353"></a><span class='hs-comment'>-- `output` satisfy matrix equations</span>
<a name="line-19354"></a><span class='hs-comment'>-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.</span>
<a name="line-19355"></a><span class='hs-definition'>matrixTriangularSolve</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19356"></a>                                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19357"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__: Shape is `[..., M, M]`.</span>
<a name="line-19358"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__: Shape is `[..., M, K]`.</span>
<a name="line-19359"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Shape is `[..., M, K]`.</span>
<a name="line-19360"></a><span class='hs-definition'>matrixTriangularSolve</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19361"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixTriangularSolve"</span>
<a name="line-19362"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19363"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span>
<a name="line-19364"></a><span class='hs-comment'>{-
<a name="line-19365"></a>attr {
<a name="line-19366"></a>  default_value { b: true }
<a name="line-19367"></a>  description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular."
<a name="line-19368"></a>  name: "lower"
<a name="line-19369"></a>  type: "bool"
<a name="line-19370"></a>}
<a name="line-19371"></a>attr {
<a name="line-19372"></a>  default_value { b: false }
<a name="line-19373"></a>  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\n         adjoint.\n\n@compatibility(numpy)\nEquivalent to np.linalg.triangular_solve\n@end_compatibility"
<a name="line-19374"></a>  name: "adjoint"
<a name="line-19375"></a>  type: "bool"
<a name="line-19376"></a>}
<a name="line-19377"></a>attr {
<a name="line-19378"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-19379"></a>  name: "T"
<a name="line-19380"></a>  type: "type"
<a name="line-19381"></a>}
<a name="line-19382"></a>input_arg {
<a name="line-19383"></a>  description: "Shape is `[..., M, M]`."
<a name="line-19384"></a>  name: "matrix"
<a name="line-19385"></a>  type_attr: "T"
<a name="line-19386"></a>}
<a name="line-19387"></a>input_arg {
<a name="line-19388"></a>  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
<a name="line-19389"></a>}
<a name="line-19390"></a>output_arg {
<a name="line-19391"></a>  description: "Shape is `[..., M, K]`."
<a name="line-19392"></a>  name: "output"
<a name="line-19393"></a>  type_attr: "T"
<a name="line-19394"></a>}
<a name="line-19395"></a>-}</span>
<a name="line-19396"></a>
<a name="line-19397"></a><a name="tensorArraySplitV2"></a><span class='hs-comment'>-- | Split the data from the input value into TensorArray elements.</span>
<a name="line-19398"></a><span class='hs-comment'>--</span>
<a name="line-19399"></a><span class='hs-comment'>-- Assuming that `lengths` takes on values</span>
<a name="line-19400"></a><span class='hs-comment'>-- </span>
<a name="line-19401"></a><span class='hs-comment'>--   ```(n0, n1, ..., n(T-1))```</span>
<a name="line-19402"></a><span class='hs-comment'>-- </span>
<a name="line-19403"></a><span class='hs-comment'>-- and that `value` has shape</span>
<a name="line-19404"></a><span class='hs-comment'>-- </span>
<a name="line-19405"></a><span class='hs-comment'>--   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,</span>
<a name="line-19406"></a><span class='hs-comment'>-- </span>
<a name="line-19407"></a><span class='hs-comment'>-- this splits values into a TensorArray with T tensors.</span>
<a name="line-19408"></a><span class='hs-comment'>-- </span>
<a name="line-19409"></a><span class='hs-comment'>-- TensorArray index t will be the subtensor of values with starting position</span>
<a name="line-19410"></a><span class='hs-comment'>-- </span>
<a name="line-19411"></a><span class='hs-comment'>--   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```</span>
<a name="line-19412"></a><span class='hs-comment'>-- </span>
<a name="line-19413"></a><span class='hs-comment'>-- and having size</span>
<a name="line-19414"></a><span class='hs-comment'>-- </span>
<a name="line-19415"></a><span class='hs-comment'>--   ```nt x d0 x d1 x ...```</span>
<a name="line-19416"></a><span class='hs-definition'>tensorArraySplitV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19417"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-19418"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The concatenated tensor to write to the TensorArray.</span>
<a name="line-19419"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __lengths__: The vector of lengths, how to split the rows of value into the</span>
<a name="line-19420"></a>                                                  <span class='hs-comment'>-- TensorArray.</span>
<a name="line-19421"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-19422"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_out__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-19423"></a><span class='hs-definition'>tensorArraySplitV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>lengths</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19424"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArraySplitV2"</span>
<a name="line-19425"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19426"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>lengths</span> <span class='hs-varid'>flow_in</span>
<a name="line-19427"></a><span class='hs-comment'>{-
<a name="line-19428"></a>attr { name: "T" type: "type" }
<a name="line-19429"></a>input_arg {
<a name="line-19430"></a>  description: "The handle to a TensorArray."
<a name="line-19431"></a>  name: "handle"
<a name="line-19432"></a>  type: DT_STRING
<a name="line-19433"></a>}
<a name="line-19434"></a>input_arg {
<a name="line-19435"></a>  description: "The concatenated tensor to write to the TensorArray."
<a name="line-19436"></a>  name: "value"
<a name="line-19437"></a>  type_attr: "T"
<a name="line-19438"></a>}
<a name="line-19439"></a>input_arg {
<a name="line-19440"></a>  description: "The vector of lengths, how to split the rows of value into the\nTensorArray."
<a name="line-19441"></a>  name: "lengths"
<a name="line-19442"></a>  type: DT_INT64
<a name="line-19443"></a>}
<a name="line-19444"></a>input_arg {
<a name="line-19445"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-19446"></a>  name: "flow_in"
<a name="line-19447"></a>  type: DT_FLOAT
<a name="line-19448"></a>}
<a name="line-19449"></a>output_arg {
<a name="line-19450"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-19451"></a>  name: "flow_out"
<a name="line-19452"></a>  type: DT_FLOAT
<a name="line-19453"></a>}
<a name="line-19454"></a>-}</span>
<a name="line-19455"></a>
<a name="line-19456"></a><a name="restore"></a><span class='hs-comment'>-- | Restores a tensor from checkpoint files.</span>
<a name="line-19457"></a><span class='hs-comment'>--</span>
<a name="line-19458"></a><span class='hs-comment'>-- Reads a tensor stored in one or several files. If there are several files (for</span>
<a name="line-19459"></a><span class='hs-comment'>-- instance because a tensor was saved as slices), `file_pattern` may contain</span>
<a name="line-19460"></a><span class='hs-comment'>-- wildcard symbols (`*` and `?`) in the filename portion only, not in the</span>
<a name="line-19461"></a><span class='hs-comment'>-- directory portion.</span>
<a name="line-19462"></a><span class='hs-comment'>-- </span>
<a name="line-19463"></a><span class='hs-comment'>-- If a `file_pattern` matches several files, `preferred_shard` can be used to hint</span>
<a name="line-19464"></a><span class='hs-comment'>-- in which file the requested tensor is likely to be found. This op will first</span>
<a name="line-19465"></a><span class='hs-comment'>-- open the file at index `preferred_shard` in the list of matching files and try</span>
<a name="line-19466"></a><span class='hs-comment'>-- to restore tensors from that file.  Only if some tensors or tensor slices are</span>
<a name="line-19467"></a><span class='hs-comment'>-- not found in that first file, then the Op opens all the files. Setting</span>
<a name="line-19468"></a><span class='hs-comment'>-- `preferred_shard` to match the value passed as the `shard` input</span>
<a name="line-19469"></a><span class='hs-comment'>-- of a matching `Save` Op may speed up Restore.  This attribute only affects</span>
<a name="line-19470"></a><span class='hs-comment'>-- performance, not correctness.  The default value -1 means files are processed in</span>
<a name="line-19471"></a><span class='hs-comment'>-- order.</span>
<a name="line-19472"></a><span class='hs-comment'>-- </span>
<a name="line-19473"></a><span class='hs-comment'>-- See also `RestoreSlice`.</span>
<a name="line-19474"></a><span class='hs-definition'>restore</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dt</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dt</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19475"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __file_pattern__: Must have a single element. The pattern of the files from</span>
<a name="line-19476"></a>                                                <span class='hs-comment'>-- which we read the tensor.</span>
<a name="line-19477"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tensor_name__: Must have a single element. The name of the tensor to be</span>
<a name="line-19478"></a>                                                   <span class='hs-comment'>-- restored.</span>
<a name="line-19479"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dt</span> <span class='hs-comment'>-- ^ __tensor__: The restored tensor.</span>
<a name="line-19480"></a><span class='hs-definition'>restore</span> <span class='hs-varid'>file_pattern</span> <span class='hs-varid'>tensor_name</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19481"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Restore"</span>
<a name="line-19482"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dt"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dt</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19483"></a>        <span class='hs-varid'>file_pattern</span> <span class='hs-varid'>tensor_name</span>
<a name="line-19484"></a><span class='hs-comment'>{-
<a name="line-19485"></a>attr {
<a name="line-19486"></a>  description: "The type of the tensor to be restored."
<a name="line-19487"></a>  name: "dt"
<a name="line-19488"></a>  type: "type"
<a name="line-19489"></a>}
<a name="line-19490"></a>attr {
<a name="line-19491"></a>  default_value { i: -1 }
<a name="line-19492"></a>  description: "Index of file to open first if multiple files match\n`file_pattern`."
<a name="line-19493"></a>  name: "preferred_shard"
<a name="line-19494"></a>  type: "int"
<a name="line-19495"></a>}
<a name="line-19496"></a>input_arg {
<a name="line-19497"></a>  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
<a name="line-19498"></a>  name: "file_pattern"
<a name="line-19499"></a>  type: DT_STRING
<a name="line-19500"></a>}
<a name="line-19501"></a>input_arg {
<a name="line-19502"></a>  description: "Must have a single element. The name of the tensor to be\nrestored."
<a name="line-19503"></a>  name: "tensor_name"
<a name="line-19504"></a>  type: DT_STRING
<a name="line-19505"></a>}
<a name="line-19506"></a>output_arg {
<a name="line-19507"></a>  description: "The restored tensor." name: "tensor" type_attr: "dt"
<a name="line-19508"></a>}
<a name="line-19509"></a>-}</span>
<a name="line-19510"></a>
<a name="line-19511"></a><span class='hs-comment'>-- | Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`</span>
<a name="line-19512"></a>
<a name="line-19513"></a><a name="quantizedReluX"></a><span class='hs-definition'>quantizedReluX</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>tinput</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-19514"></a>                                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-19515"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19516"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-19517"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-19518"></a>                                                        <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-19519"></a>                                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-19520"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19521"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-19522"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19523"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-19524"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_value__</span>
<a name="line-19525"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_features__: The float value that the lowest quantized value represents.</span>
<a name="line-19526"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_features__: The float value that the highest quantized value represents.</span>
<a name="line-19527"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-19528"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-19529"></a>                  <span class='hs-comment'>-- ^ (__activations__, __min_activations__, __max_activations__)</span>
<a name="line-19530"></a>                  <span class='hs-comment'>--</span>
<a name="line-19531"></a>                  <span class='hs-comment'>-- * __activations__: Has the same output shape as "features".</span>
<a name="line-19532"></a>                  <span class='hs-comment'>--</span>
<a name="line-19533"></a>                  <span class='hs-comment'>-- * __min_activations__: The float value that the lowest quantized value represents.</span>
<a name="line-19534"></a>                  <span class='hs-comment'>--</span>
<a name="line-19535"></a>                  <span class='hs-comment'>-- * __max_activations__: The float value that the highest quantized value represents.</span>
<a name="line-19536"></a><span class='hs-definition'>quantizedReluX</span> <span class='hs-varid'>features</span> <span class='hs-varid'>max_value</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19537"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedReluX"</span>
<a name="line-19538"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-19539"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19540"></a>        <span class='hs-varid'>features</span> <span class='hs-varid'>max_value</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span>
<a name="line-19541"></a><span class='hs-comment'>{-
<a name="line-19542"></a>attr {
<a name="line-19543"></a>  allowed_values {
<a name="line-19544"></a>    list {
<a name="line-19545"></a>      type: DT_QINT8
<a name="line-19546"></a>      type: DT_QUINT8
<a name="line-19547"></a>      type: DT_QINT16
<a name="line-19548"></a>      type: DT_QUINT16
<a name="line-19549"></a>      type: DT_QINT32
<a name="line-19550"></a>    }
<a name="line-19551"></a>  }
<a name="line-19552"></a>  name: "Tinput"
<a name="line-19553"></a>  type: "type"
<a name="line-19554"></a>}
<a name="line-19555"></a>attr {
<a name="line-19556"></a>  allowed_values {
<a name="line-19557"></a>    list {
<a name="line-19558"></a>      type: DT_QINT8
<a name="line-19559"></a>      type: DT_QUINT8
<a name="line-19560"></a>      type: DT_QINT16
<a name="line-19561"></a>      type: DT_QUINT16
<a name="line-19562"></a>      type: DT_QINT32
<a name="line-19563"></a>    }
<a name="line-19564"></a>  }
<a name="line-19565"></a>  default_value { type: DT_QUINT8 }
<a name="line-19566"></a>  name: "out_type"
<a name="line-19567"></a>  type: "type"
<a name="line-19568"></a>}
<a name="line-19569"></a>input_arg { name: "features" type_attr: "Tinput" }
<a name="line-19570"></a>input_arg { name: "max_value" type: DT_FLOAT }
<a name="line-19571"></a>input_arg {
<a name="line-19572"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-19573"></a>  name: "min_features"
<a name="line-19574"></a>  type: DT_FLOAT
<a name="line-19575"></a>}
<a name="line-19576"></a>input_arg {
<a name="line-19577"></a>  description: "The float value that the highest quantized value represents."
<a name="line-19578"></a>  name: "max_features"
<a name="line-19579"></a>  type: DT_FLOAT
<a name="line-19580"></a>}
<a name="line-19581"></a>output_arg {
<a name="line-19582"></a>  description: "Has the same output shape as \"features\"."
<a name="line-19583"></a>  name: "activations"
<a name="line-19584"></a>  type_attr: "out_type"
<a name="line-19585"></a>}
<a name="line-19586"></a>output_arg {
<a name="line-19587"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-19588"></a>  name: "min_activations"
<a name="line-19589"></a>  type: DT_FLOAT
<a name="line-19590"></a>}
<a name="line-19591"></a>output_arg {
<a name="line-19592"></a>  description: "The float value that the highest quantized value represents."
<a name="line-19593"></a>  name: "max_activations"
<a name="line-19594"></a>  type: DT_FLOAT
<a name="line-19595"></a>}
<a name="line-19596"></a>-}</span>
<a name="line-19597"></a>
<a name="line-19598"></a><a name="accumulatorTakeGradient"></a><span class='hs-comment'>-- | Extracts the average gradient in the given ConditionalAccumulator, provided</span>
<a name="line-19599"></a><span class='hs-comment'>--</span>
<a name="line-19600"></a><span class='hs-comment'>-- that sufficient (i.e., more than num_required) gradients have been accumulated.</span>
<a name="line-19601"></a><span class='hs-comment'>-- The op blocks until sufficient gradients have been accumulated.</span>
<a name="line-19602"></a><span class='hs-comment'>-- If the accumulator has already aggregated more than num_required gradients, it</span>
<a name="line-19603"></a><span class='hs-comment'>-- returns the average of the accumulated gradients.</span>
<a name="line-19604"></a><span class='hs-comment'>-- Also automatically increments the recorded global_step in the accumulator by 1,</span>
<a name="line-19605"></a><span class='hs-comment'>-- and resets the aggregate to 0.</span>
<a name="line-19606"></a><span class='hs-definition'>accumulatorTakeGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-19607"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-19608"></a>                                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-19609"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-19610"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19611"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-19612"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-19613"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-19614"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19615"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19616"></a>                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to an accumulator.</span>
<a name="line-19617"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_required__: Number of gradients required before we return an aggregate.</span>
<a name="line-19618"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __average__: The average of the accumulated gradients.</span>
<a name="line-19619"></a><span class='hs-definition'>accumulatorTakeGradient</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>num_required</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19620"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AccumulatorTakeGradient"</span>
<a name="line-19621"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19622"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>num_required</span>
<a name="line-19623"></a><span class='hs-comment'>{-
<a name="line-19624"></a>attr {
<a name="line-19625"></a>  allowed_values {
<a name="line-19626"></a>    list {
<a name="line-19627"></a>      type: DT_FLOAT
<a name="line-19628"></a>      type: DT_DOUBLE
<a name="line-19629"></a>      type: DT_INT64
<a name="line-19630"></a>      type: DT_INT32
<a name="line-19631"></a>      type: DT_UINT8
<a name="line-19632"></a>      type: DT_UINT16
<a name="line-19633"></a>      type: DT_INT16
<a name="line-19634"></a>      type: DT_INT8
<a name="line-19635"></a>      type: DT_COMPLEX64
<a name="line-19636"></a>      type: DT_COMPLEX128
<a name="line-19637"></a>      type: DT_QINT8
<a name="line-19638"></a>      type: DT_QUINT8
<a name="line-19639"></a>      type: DT_QINT32
<a name="line-19640"></a>      type: DT_HALF
<a name="line-19641"></a>    }
<a name="line-19642"></a>  }
<a name="line-19643"></a>  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
<a name="line-19644"></a>  name: "dtype"
<a name="line-19645"></a>  type: "type"
<a name="line-19646"></a>}
<a name="line-19647"></a>input_arg {
<a name="line-19648"></a>  description: "The handle to an accumulator."
<a name="line-19649"></a>  is_ref: true
<a name="line-19650"></a>  name: "handle"
<a name="line-19651"></a>  type: DT_STRING
<a name="line-19652"></a>}
<a name="line-19653"></a>input_arg {
<a name="line-19654"></a>  description: "Number of gradients required before we return an aggregate."
<a name="line-19655"></a>  name: "num_required"
<a name="line-19656"></a>  type: DT_INT32
<a name="line-19657"></a>}
<a name="line-19658"></a>output_arg {
<a name="line-19659"></a>  description: "The average of the accumulated gradients."
<a name="line-19660"></a>  name: "average"
<a name="line-19661"></a>  type_attr: "dtype"
<a name="line-19662"></a>}
<a name="line-19663"></a>-}</span>
<a name="line-19664"></a>
<a name="line-19665"></a><a name="floorMod"></a><span class='hs-comment'>-- | Returns element-wise remainder of division. When `x &lt; 0` xor `y &lt; 0` is</span>
<a name="line-19666"></a><span class='hs-comment'>--</span>
<a name="line-19667"></a><span class='hs-comment'>-- true, this follows Python semantics in that the result here is consistent</span>
<a name="line-19668"></a><span class='hs-comment'>-- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.</span>
<a name="line-19669"></a><span class='hs-comment'>-- </span>
<a name="line-19670"></a><span class='hs-comment'>-- *NOTE*: `FloorMod` supports broadcasting. More about broadcasting</span>
<a name="line-19671"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-19672"></a><span class='hs-definition'>floorMod</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19673"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19674"></a>                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19675"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-19676"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-19677"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-19678"></a><span class='hs-definition'>floorMod</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19679"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FloorMod"</span>
<a name="line-19680"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19681"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-19682"></a><span class='hs-comment'>{-
<a name="line-19683"></a>attr {
<a name="line-19684"></a>  allowed_values {
<a name="line-19685"></a>    list {
<a name="line-19686"></a>      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
<a name="line-19687"></a>    }
<a name="line-19688"></a>  }
<a name="line-19689"></a>  name: "T"
<a name="line-19690"></a>  type: "type"
<a name="line-19691"></a>}
<a name="line-19692"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-19693"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-19694"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-19695"></a>-}</span>
<a name="line-19696"></a>
<a name="line-19697"></a><a name="matchingFiles"></a><span class='hs-comment'>-- | Returns the set of files matching a pattern.</span>
<a name="line-19698"></a><span class='hs-comment'>--</span>
<a name="line-19699"></a><span class='hs-comment'>-- Note that this routine only supports wildcard characters in the</span>
<a name="line-19700"></a><span class='hs-comment'>-- basename portion of the pattern, not in the directory portion.</span>
<a name="line-19701"></a><span class='hs-definition'>matchingFiles</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __pattern__: A (scalar) shell wildcard pattern.</span>
<a name="line-19702"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filenames__: A vector of matching filenames.</span>
<a name="line-19703"></a><span class='hs-definition'>matchingFiles</span> <span class='hs-varid'>pattern</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19704"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatchingFiles"</span><span class='hs-layout'>)</span>
<a name="line-19705"></a>        <span class='hs-varid'>pattern</span>
<a name="line-19706"></a><span class='hs-comment'>{-
<a name="line-19707"></a>input_arg {
<a name="line-19708"></a>  description: "A (scalar) shell wildcard pattern."
<a name="line-19709"></a>  name: "pattern"
<a name="line-19710"></a>  type: DT_STRING
<a name="line-19711"></a>}
<a name="line-19712"></a>output_arg {
<a name="line-19713"></a>  description: "A vector of matching filenames."
<a name="line-19714"></a>  name: "filenames"
<a name="line-19715"></a>  type: DT_STRING
<a name="line-19716"></a>}
<a name="line-19717"></a>-}</span>
<a name="line-19718"></a>
<a name="line-19719"></a><span class='hs-comment'>-- | Performs max pooling on the input.</span>
<a name="line-19720"></a>
<a name="line-19721"></a><a name="maxPool"></a><span class='hs-definition'>maxPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19722"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D input to pool over.</span>
<a name="line-19723"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The max pooled output tensor.</span>
<a name="line-19724"></a><span class='hs-definition'>maxPool</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19725"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPool"</span>
<a name="line-19726"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19727"></a>        <span class='hs-varid'>input</span>
<a name="line-19728"></a><span class='hs-comment'>{-
<a name="line-19729"></a>attr {
<a name="line-19730"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-19731"></a>  default_value { type: DT_FLOAT }
<a name="line-19732"></a>  name: "T"
<a name="line-19733"></a>  type: "type"
<a name="line-19734"></a>}
<a name="line-19735"></a>attr {
<a name="line-19736"></a>  description: "The size of the window for each dimension of the input tensor."
<a name="line-19737"></a>  has_minimum: true
<a name="line-19738"></a>  minimum: 4
<a name="line-19739"></a>  name: "ksize"
<a name="line-19740"></a>  type: "list(int)"
<a name="line-19741"></a>}
<a name="line-19742"></a>attr {
<a name="line-19743"></a>  description: "The stride of the sliding window for each dimension of the\ninput tensor."
<a name="line-19744"></a>  has_minimum: true
<a name="line-19745"></a>  minimum: 4
<a name="line-19746"></a>  name: "strides"
<a name="line-19747"></a>  type: "list(int)"
<a name="line-19748"></a>}
<a name="line-19749"></a>attr {
<a name="line-19750"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-19751"></a>  description: "The type of padding algorithm to use."
<a name="line-19752"></a>  name: "padding"
<a name="line-19753"></a>  type: "string"
<a name="line-19754"></a>}
<a name="line-19755"></a>attr {
<a name="line-19756"></a>  allowed_values { list { s: "NHWC" s: "NCHW" } }
<a name="line-19757"></a>  default_value { s: "NHWC" }
<a name="line-19758"></a>  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
<a name="line-19759"></a>  name: "data_format"
<a name="line-19760"></a>  type: "string"
<a name="line-19761"></a>}
<a name="line-19762"></a>input_arg {
<a name="line-19763"></a>  description: "4-D input to pool over." name: "input" type_attr: "T"
<a name="line-19764"></a>}
<a name="line-19765"></a>output_arg {
<a name="line-19766"></a>  description: "The max pooled output tensor."
<a name="line-19767"></a>  name: "output"
<a name="line-19768"></a>  type_attr: "T"
<a name="line-19769"></a>}
<a name="line-19770"></a>-}</span>
<a name="line-19771"></a>
<a name="line-19772"></a><a name="computeAccidentalHits"></a><span class='hs-comment'>-- | Computes the ids of the positions in sampled_candidates that match true_labels.</span>
<a name="line-19773"></a><span class='hs-comment'>--</span>
<a name="line-19774"></a><span class='hs-comment'>-- When doing log-odds NCE, the result of this op should be passed through a</span>
<a name="line-19775"></a><span class='hs-comment'>-- SparseToDense op, then added to the logits of the sampled candidates. This has</span>
<a name="line-19776"></a><span class='hs-comment'>-- the effect of 'removing' the sampled labels that match the true labels by</span>
<a name="line-19777"></a><span class='hs-comment'>-- making the classifier sure that they are sampled labels.</span>
<a name="line-19778"></a><span class='hs-definition'>computeAccidentalHits</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-19779"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: The true_classes output of UnpackSparseLabels.</span>
<a name="line-19780"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler.</span>
<a name="line-19781"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19782"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-19783"></a>                         <span class='hs-comment'>-- ^ (__indices__, __ids__, __weights__)</span>
<a name="line-19784"></a>                         <span class='hs-comment'>--</span>
<a name="line-19785"></a>                         <span class='hs-comment'>-- * __indices__: A vector of indices corresponding to rows of true_candidates.</span>
<a name="line-19786"></a>                         <span class='hs-comment'>--</span>
<a name="line-19787"></a>                         <span class='hs-comment'>-- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label</span>
<a name="line-19788"></a>                         <span class='hs-comment'>-- for the row with the corresponding index in indices.</span>
<a name="line-19789"></a>                         <span class='hs-comment'>--</span>
<a name="line-19790"></a>                         <span class='hs-comment'>-- * __weights__: A vector of the same length as indices and ids, in which each element</span>
<a name="line-19791"></a>                         <span class='hs-comment'>-- is -FLOAT_MAX.</span>
<a name="line-19792"></a><span class='hs-definition'>computeAccidentalHits</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>true_classes</span>
<a name="line-19793"></a>                      <span class='hs-varid'>sampled_candidates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19794"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ComputeAccidentalHits"</span>
<a name="line-19795"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span><span class='hs-layout'>)</span>
<a name="line-19796"></a>        <span class='hs-varid'>true_classes</span> <span class='hs-varid'>sampled_candidates</span>
<a name="line-19797"></a><span class='hs-comment'>{-
<a name="line-19798"></a>attr {
<a name="line-19799"></a>  description: "Number of true labels per context."
<a name="line-19800"></a>  name: "num_true"
<a name="line-19801"></a>  type: "int"
<a name="line-19802"></a>}
<a name="line-19803"></a>attr {
<a name="line-19804"></a>  default_value { i: 0 }
<a name="line-19805"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-19806"></a>  name: "seed"
<a name="line-19807"></a>  type: "int"
<a name="line-19808"></a>}
<a name="line-19809"></a>attr {
<a name="line-19810"></a>  default_value { i: 0 }
<a name="line-19811"></a>  description: "An second seed to avoid seed collision."
<a name="line-19812"></a>  name: "seed2"
<a name="line-19813"></a>  type: "int"
<a name="line-19814"></a>}
<a name="line-19815"></a>input_arg {
<a name="line-19816"></a>  description: "The true_classes output of UnpackSparseLabels."
<a name="line-19817"></a>  name: "true_classes"
<a name="line-19818"></a>  type: DT_INT64
<a name="line-19819"></a>}
<a name="line-19820"></a>input_arg {
<a name="line-19821"></a>  description: "The sampled_candidates output of CandidateSampler."
<a name="line-19822"></a>  name: "sampled_candidates"
<a name="line-19823"></a>  type: DT_INT64
<a name="line-19824"></a>}
<a name="line-19825"></a>output_arg {
<a name="line-19826"></a>  description: "A vector of indices corresponding to rows of true_candidates."
<a name="line-19827"></a>  name: "indices"
<a name="line-19828"></a>  type: DT_INT32
<a name="line-19829"></a>}
<a name="line-19830"></a>output_arg {
<a name="line-19831"></a>  description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices."
<a name="line-19832"></a>  name: "ids"
<a name="line-19833"></a>  type: DT_INT64
<a name="line-19834"></a>}
<a name="line-19835"></a>output_arg {
<a name="line-19836"></a>  description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX."
<a name="line-19837"></a>  name: "weights"
<a name="line-19838"></a>  type: DT_FLOAT
<a name="line-19839"></a>}
<a name="line-19840"></a>-}</span>
<a name="line-19841"></a>
<a name="line-19842"></a><a name="deserializeManySparse"></a><span class='hs-comment'>-- | Deserialize and concatenate `SparseTensors` from a serialized minibatch.</span>
<a name="line-19843"></a><span class='hs-comment'>--</span>
<a name="line-19844"></a><span class='hs-comment'>-- The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where</span>
<a name="line-19845"></a><span class='hs-comment'>-- `N` is the minibatch size and the rows correspond to packed outputs of</span>
<a name="line-19846"></a><span class='hs-comment'>-- `SerializeSparse`.  The ranks of the original `SparseTensor` objects</span>
<a name="line-19847"></a><span class='hs-comment'>-- must all match.  When the final `SparseTensor` is created, it has rank one</span>
<a name="line-19848"></a><span class='hs-comment'>-- higher than the ranks of the incoming `SparseTensor` objects</span>
<a name="line-19849"></a><span class='hs-comment'>-- (they have been concatenated along a new row dimension).</span>
<a name="line-19850"></a><span class='hs-comment'>-- </span>
<a name="line-19851"></a><span class='hs-comment'>-- The output `SparseTensor` object's shape values for all dimensions but the</span>
<a name="line-19852"></a><span class='hs-comment'>-- first are the max across the input `SparseTensor` objects' shape values</span>
<a name="line-19853"></a><span class='hs-comment'>-- for the corresponding dimensions.  Its first shape value is `N`, the minibatch</span>
<a name="line-19854"></a><span class='hs-comment'>-- size.</span>
<a name="line-19855"></a><span class='hs-comment'>-- </span>
<a name="line-19856"></a><span class='hs-comment'>-- The input `SparseTensor` objects' indices are assumed ordered in</span>
<a name="line-19857"></a><span class='hs-comment'>-- standard lexicographic order.  If this is not the case, after this</span>
<a name="line-19858"></a><span class='hs-comment'>-- step run `SparseReorder` to restore index ordering.</span>
<a name="line-19859"></a><span class='hs-comment'>-- </span>
<a name="line-19860"></a><span class='hs-comment'>-- For example, if the serialized input is a `[2 x 3]` matrix representing two</span>
<a name="line-19861"></a><span class='hs-comment'>-- original `SparseTensor` objects:</span>
<a name="line-19862"></a><span class='hs-comment'>-- </span>
<a name="line-19863"></a><span class='hs-comment'>--     index = [ 0]</span>
<a name="line-19864"></a><span class='hs-comment'>--             [10]</span>
<a name="line-19865"></a><span class='hs-comment'>--             [20]</span>
<a name="line-19866"></a><span class='hs-comment'>--     values = [1, 2, 3]</span>
<a name="line-19867"></a><span class='hs-comment'>--     shape = [50]</span>
<a name="line-19868"></a><span class='hs-comment'>-- </span>
<a name="line-19869"></a><span class='hs-comment'>-- and</span>
<a name="line-19870"></a><span class='hs-comment'>-- </span>
<a name="line-19871"></a><span class='hs-comment'>--     index = [ 2]</span>
<a name="line-19872"></a><span class='hs-comment'>--             [10]</span>
<a name="line-19873"></a><span class='hs-comment'>--     values = [4, 5]</span>
<a name="line-19874"></a><span class='hs-comment'>--     shape = [30]</span>
<a name="line-19875"></a><span class='hs-comment'>-- </span>
<a name="line-19876"></a><span class='hs-comment'>-- then the final deserialized `SparseTensor` will be:</span>
<a name="line-19877"></a><span class='hs-comment'>-- </span>
<a name="line-19878"></a><span class='hs-comment'>--     index = [0  0]</span>
<a name="line-19879"></a><span class='hs-comment'>--             [0 10]</span>
<a name="line-19880"></a><span class='hs-comment'>--             [0 20]</span>
<a name="line-19881"></a><span class='hs-comment'>--             [1  2]</span>
<a name="line-19882"></a><span class='hs-comment'>--             [1 10]</span>
<a name="line-19883"></a><span class='hs-comment'>--     values = [1, 2, 3, 4, 5]</span>
<a name="line-19884"></a><span class='hs-comment'>--     shape = [2 50]</span>
<a name="line-19885"></a><span class='hs-definition'>deserializeManySparse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19886"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects.</span>
<a name="line-19887"></a>                                                              <span class='hs-comment'>-- Must have 3 columns.</span>
<a name="line-19888"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-19889"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-19890"></a>                         <span class='hs-comment'>-- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)</span>
<a name="line-19891"></a>                         <span class='hs-comment'>--</span>
<a name="line-19892"></a>                         <span class='hs-comment'>-- * __sparse_indices__</span>
<a name="line-19893"></a>                         <span class='hs-comment'>--</span>
<a name="line-19894"></a>                         <span class='hs-comment'>-- * __sparse_values__</span>
<a name="line-19895"></a>                         <span class='hs-comment'>--</span>
<a name="line-19896"></a>                         <span class='hs-comment'>-- * __sparse_shape__</span>
<a name="line-19897"></a><span class='hs-definition'>deserializeManySparse</span> <span class='hs-varid'>serialized_sparse</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19898"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DeserializeManySparse"</span>
<a name="line-19899"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19900"></a>        <span class='hs-varid'>serialized_sparse</span>
<a name="line-19901"></a><span class='hs-comment'>{-
<a name="line-19902"></a>attr {
<a name="line-19903"></a>  description: "The `dtype` of the serialized `SparseTensor` objects."
<a name="line-19904"></a>  name: "dtype"
<a name="line-19905"></a>  type: "type"
<a name="line-19906"></a>}
<a name="line-19907"></a>input_arg {
<a name="line-19908"></a>  description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns."
<a name="line-19909"></a>  name: "serialized_sparse"
<a name="line-19910"></a>  type: DT_STRING
<a name="line-19911"></a>}
<a name="line-19912"></a>output_arg { name: "sparse_indices" type: DT_INT64 }
<a name="line-19913"></a>output_arg { name: "sparse_values" type_attr: "dtype" }
<a name="line-19914"></a>output_arg { name: "sparse_shape" type: DT_INT64 }
<a name="line-19915"></a>-}</span>
<a name="line-19916"></a>
<a name="line-19917"></a><a name="cropAndResize"></a><span class='hs-comment'>-- | Extracts crops from the input image tensor and bilinearly resizes them (possibly</span>
<a name="line-19918"></a><span class='hs-comment'>--</span>
<a name="line-19919"></a><span class='hs-comment'>-- with aspect ratio change) to a common output size specified by `crop_size`. This</span>
<a name="line-19920"></a><span class='hs-comment'>-- is more general than the `crop_to_bounding_box` op which extracts a fixed size</span>
<a name="line-19921"></a><span class='hs-comment'>-- slice from the input image and does not allow resizing or aspect ratio change.</span>
<a name="line-19922"></a><span class='hs-comment'>-- </span>
<a name="line-19923"></a><span class='hs-comment'>-- Returns a tensor with `crops` from the input `image` at positions defined at the</span>
<a name="line-19924"></a><span class='hs-comment'>-- bounding box locations in `boxes`. The cropped boxes are all resized (with</span>
<a name="line-19925"></a><span class='hs-comment'>-- bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The</span>
<a name="line-19926"></a><span class='hs-comment'>-- result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.</span>
<a name="line-19927"></a><span class='hs-definition'>cropAndResize</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-19928"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-19929"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-19930"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-19931"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-19932"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-19933"></a>                                                               <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-19934"></a>                                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-19935"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.</span>
<a name="line-19936"></a>                             <span class='hs-comment'>-- Both `image_height` and `image_width` need to be positive.</span>
<a name="line-19937"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor</span>
<a name="line-19938"></a>                                    <span class='hs-comment'>-- specifies the coordinates of a box in the `box_ind[i]` image and is specified</span>
<a name="line-19939"></a>                                    <span class='hs-comment'>-- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of</span>
<a name="line-19940"></a>                                    <span class='hs-comment'>-- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the</span>
<a name="line-19941"></a>                                    <span class='hs-comment'>-- `[0, 1]` interval of normalized image height is mapped to</span>
<a name="line-19942"></a>                                    <span class='hs-comment'>-- `[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in</span>
<a name="line-19943"></a>                                    <span class='hs-comment'>-- which case the sampled crop is an up-down flipped version of the original</span>
<a name="line-19944"></a>                                    <span class='hs-comment'>-- image. The width dimension is treated similarly. Normalized coordinates</span>
<a name="line-19945"></a>                                    <span class='hs-comment'>-- outside the `[0, 1]` range are allowed, in which case we use</span>
<a name="line-19946"></a>                                    <span class='hs-comment'>-- `extrapolation_value` to extrapolate the input image values.</span>
<a name="line-19947"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.</span>
<a name="line-19948"></a>                                             <span class='hs-comment'>-- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.</span>
<a name="line-19949"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All</span>
<a name="line-19950"></a>                                             <span class='hs-comment'>-- cropped image patches are resized to this size. The aspect ratio of the image</span>
<a name="line-19951"></a>                                             <span class='hs-comment'>-- content is not preserved. Both `crop_height` and `crop_width` need to be</span>
<a name="line-19952"></a>                                             <span class='hs-comment'>-- positive.</span>
<a name="line-19953"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.</span>
<a name="line-19954"></a><span class='hs-definition'>cropAndResize</span> <span class='hs-varid'>image</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span> <span class='hs-varid'>crop_size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-19955"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CropAndResize"</span>
<a name="line-19956"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-19957"></a>        <span class='hs-varid'>image</span> <span class='hs-varid'>boxes</span> <span class='hs-varid'>box_ind</span> <span class='hs-varid'>crop_size</span>
<a name="line-19958"></a><span class='hs-comment'>{-
<a name="line-19959"></a>attr {
<a name="line-19960"></a>  allowed_values {
<a name="line-19961"></a>    list {
<a name="line-19962"></a>      type: DT_UINT8
<a name="line-19963"></a>      type: DT_INT8
<a name="line-19964"></a>      type: DT_INT16
<a name="line-19965"></a>      type: DT_INT32
<a name="line-19966"></a>      type: DT_INT64
<a name="line-19967"></a>      type: DT_HALF
<a name="line-19968"></a>      type: DT_FLOAT
<a name="line-19969"></a>      type: DT_DOUBLE
<a name="line-19970"></a>    }
<a name="line-19971"></a>  }
<a name="line-19972"></a>  name: "T"
<a name="line-19973"></a>  type: "type"
<a name="line-19974"></a>}
<a name="line-19975"></a>attr {
<a name="line-19976"></a>  allowed_values { list { s: "bilinear" } }
<a name="line-19977"></a>  default_value { s: "bilinear" }
<a name="line-19978"></a>  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
<a name="line-19979"></a>  name: "method"
<a name="line-19980"></a>  type: "string"
<a name="line-19981"></a>}
<a name="line-19982"></a>attr {
<a name="line-19983"></a>  default_value { f: 0.0 }
<a name="line-19984"></a>  description: "Value used for extrapolation, when applicable."
<a name="line-19985"></a>  name: "extrapolation_value"
<a name="line-19986"></a>  type: "float"
<a name="line-19987"></a>}
<a name="line-19988"></a>input_arg {
<a name="line-19989"></a>  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
<a name="line-19990"></a>  name: "image"
<a name="line-19991"></a>  type_attr: "T"
<a name="line-19992"></a>}
<a name="line-19993"></a>input_arg {
<a name="line-19994"></a>  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 &gt; y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
<a name="line-19995"></a>  name: "boxes"
<a name="line-19996"></a>  type: DT_FLOAT
<a name="line-19997"></a>}
<a name="line-19998"></a>input_arg {
<a name="line-19999"></a>  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
<a name="line-20000"></a>  name: "box_ind"
<a name="line-20001"></a>  type: DT_INT32
<a name="line-20002"></a>}
<a name="line-20003"></a>input_arg {
<a name="line-20004"></a>  description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive."
<a name="line-20005"></a>  name: "crop_size"
<a name="line-20006"></a>  type: DT_INT32
<a name="line-20007"></a>}
<a name="line-20008"></a>output_arg {
<a name="line-20009"></a>  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
<a name="line-20010"></a>  name: "crops"
<a name="line-20011"></a>  type: DT_FLOAT
<a name="line-20012"></a>}
<a name="line-20013"></a>-}</span>
<a name="line-20014"></a>
<a name="line-20015"></a><a name="scatterUpdate"></a><span class='hs-comment'>-- | Applies sparse updates to a variable reference.</span>
<a name="line-20016"></a><span class='hs-comment'>--</span>
<a name="line-20017"></a><span class='hs-comment'>-- This operation computes</span>
<a name="line-20018"></a><span class='hs-comment'>-- </span>
<a name="line-20019"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-20020"></a><span class='hs-comment'>--     ref[indices, ...] = updates[...]</span>
<a name="line-20021"></a><span class='hs-comment'>-- </span>
<a name="line-20022"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-20023"></a><span class='hs-comment'>--     ref[indices[i], ...] = updates[i, ...]</span>
<a name="line-20024"></a><span class='hs-comment'>-- </span>
<a name="line-20025"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-20026"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]</span>
<a name="line-20027"></a><span class='hs-comment'>-- </span>
<a name="line-20028"></a><span class='hs-comment'>-- This operation outputs `ref` after the update is done.</span>
<a name="line-20029"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-20030"></a><span class='hs-comment'>-- </span>
<a name="line-20031"></a><span class='hs-comment'>-- If values in `ref` is to be updated more than once, because there are</span>
<a name="line-20032"></a><span class='hs-comment'>-- duplicate entires in `indices`, the order at which the updates happen</span>
<a name="line-20033"></a><span class='hs-comment'>-- for each value is undefined.</span>
<a name="line-20034"></a><span class='hs-comment'>-- </span>
<a name="line-20035"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-20036"></a><span class='hs-comment'>-- </span>
<a name="line-20037"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-20038"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterUpdate.png" alt&gt;</span>
<a name="line-20039"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-20040"></a><span class='hs-definition'>scatterUpdate</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-20041"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20042"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20043"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-20044"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-20045"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A tensor of updated values to store in `ref`.</span>
<a name="line-20046"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want</span>
<a name="line-20047"></a>                 <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-20048"></a><span class='hs-definition'>scatterUpdate</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20049"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterUpdate"</span>
<a name="line-20050"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-20051"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20052"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-20053"></a><span class='hs-comment'>{-
<a name="line-20054"></a>attr { name: "T" type: "type" }
<a name="line-20055"></a>attr {
<a name="line-20056"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-20057"></a>  name: "Tindices"
<a name="line-20058"></a>  type: "type"
<a name="line-20059"></a>}
<a name="line-20060"></a>attr {
<a name="line-20061"></a>  default_value { b: true }
<a name="line-20062"></a>  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-20063"></a>  name: "use_locking"
<a name="line-20064"></a>  type: "bool"
<a name="line-20065"></a>}
<a name="line-20066"></a>input_arg {
<a name="line-20067"></a>  description: "Should be from a `Variable` node."
<a name="line-20068"></a>  is_ref: true
<a name="line-20069"></a>  name: "ref"
<a name="line-20070"></a>  type_attr: "T"
<a name="line-20071"></a>}
<a name="line-20072"></a>input_arg {
<a name="line-20073"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-20074"></a>  name: "indices"
<a name="line-20075"></a>  type_attr: "Tindices"
<a name="line-20076"></a>}
<a name="line-20077"></a>input_arg {
<a name="line-20078"></a>  description: "A tensor of updated values to store in `ref`."
<a name="line-20079"></a>  name: "updates"
<a name="line-20080"></a>  type_attr: "T"
<a name="line-20081"></a>}
<a name="line-20082"></a>output_arg {
<a name="line-20083"></a>  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-20084"></a>  is_ref: true
<a name="line-20085"></a>  name: "output_ref"
<a name="line-20086"></a>  type_attr: "T"
<a name="line-20087"></a>}
<a name="line-20088"></a>-}</span>
<a name="line-20089"></a>
<a name="line-20090"></a><a name="randomGamma"></a><span class='hs-comment'>-- | Outputs random values from the Gamma distribution(s) described by alpha.</span>
<a name="line-20091"></a><span class='hs-comment'>--</span>
<a name="line-20092"></a><span class='hs-comment'>-- This op uses the algorithm by Marsaglia et al. to acquire samples via</span>
<a name="line-20093"></a><span class='hs-comment'>-- transformation-rejection from pairs of uniform and normal random variables.</span>
<a name="line-20094"></a><span class='hs-comment'>-- See <a href="http://dl.acm.org/citation.cfm?id=358414">http://dl.acm.org/citation.cfm?id=358414</a></span>
<a name="line-20095"></a><span class='hs-definition'>randomGamma</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>s</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>s</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20096"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>s</span><span class='hs-layout'>,</span>
<a name="line-20097"></a>                                   <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-20098"></a>                                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20099"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>s</span> <span class='hs-comment'>-- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each</span>
<a name="line-20100"></a>                           <span class='hs-comment'>-- distribution described by the shape parameters given in alpha.</span>
<a name="line-20101"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the</span>
<a name="line-20102"></a>                              <span class='hs-comment'>-- associated gamma distribution.</span>
<a name="line-20103"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice</span>
<a name="line-20104"></a>               <span class='hs-comment'>-- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for</span>
<a name="line-20105"></a>               <span class='hs-comment'>-- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.</span>
<a name="line-20106"></a><span class='hs-definition'>randomGamma</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>alpha</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20107"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomGamma"</span>
<a name="line-20108"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"S"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>s</span><span class='hs-layout'>)</span>
<a name="line-20109"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20110"></a>        <span class='hs-varid'>shape</span> <span class='hs-varid'>alpha</span>
<a name="line-20111"></a><span class='hs-comment'>{-
<a name="line-20112"></a>attr {
<a name="line-20113"></a>  default_value { i: 0 }
<a name="line-20114"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-20115"></a>  name: "seed"
<a name="line-20116"></a>  type: "int"
<a name="line-20117"></a>}
<a name="line-20118"></a>attr {
<a name="line-20119"></a>  default_value { i: 0 }
<a name="line-20120"></a>  description: "A second seed to avoid seed collision."
<a name="line-20121"></a>  name: "seed2"
<a name="line-20122"></a>  type: "int"
<a name="line-20123"></a>}
<a name="line-20124"></a>attr {
<a name="line-20125"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-20126"></a>  name: "S"
<a name="line-20127"></a>  type: "type"
<a name="line-20128"></a>}
<a name="line-20129"></a>attr {
<a name="line-20130"></a>  allowed_values {
<a name="line-20131"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-20132"></a>  }
<a name="line-20133"></a>  name: "T"
<a name="line-20134"></a>  type: "type"
<a name="line-20135"></a>}
<a name="line-20136"></a>input_arg {
<a name="line-20137"></a>  description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha."
<a name="line-20138"></a>  name: "shape"
<a name="line-20139"></a>  type_attr: "S"
<a name="line-20140"></a>}
<a name="line-20141"></a>input_arg {
<a name="line-20142"></a>  description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution."
<a name="line-20143"></a>  name: "alpha"
<a name="line-20144"></a>  type_attr: "T"
<a name="line-20145"></a>}
<a name="line-20146"></a>output_arg {
<a name="line-20147"></a>  description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha."
<a name="line-20148"></a>  name: "output"
<a name="line-20149"></a>  type_attr: "T"
<a name="line-20150"></a>}
<a name="line-20151"></a>-}</span>
<a name="line-20152"></a>
<a name="line-20153"></a><span class='hs-comment'>-- | </span>
<a name="line-20154"></a>
<a name="line-20155"></a><a name="batchMatrixSolve"></a><span class='hs-definition'>batchMatrixSolve</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20156"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __matrix__</span>
<a name="line-20157"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __rhs__</span>
<a name="line-20158"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-20159"></a><span class='hs-definition'>batchMatrixSolve</span> <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20160"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixSolve"</span>
<a name="line-20161"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20162"></a>        <span class='hs-varid'>matrix</span> <span class='hs-varid'>rhs</span>
<a name="line-20163"></a><span class='hs-comment'>{-
<a name="line-20164"></a>attr { default_value { b: false } name: "adjoint" type: "bool" }
<a name="line-20165"></a>attr {
<a name="line-20166"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-20167"></a>  name: "T"
<a name="line-20168"></a>  type: "type"
<a name="line-20169"></a>}
<a name="line-20170"></a>input_arg { name: "matrix" type_attr: "T" }
<a name="line-20171"></a>input_arg { name: "rhs" type_attr: "T" }
<a name="line-20172"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-20173"></a>-}</span>
<a name="line-20174"></a>
<a name="line-20175"></a><span class='hs-comment'>-- | </span>
<a name="line-20176"></a>
<a name="line-20177"></a><a name="batchMatrixBandPart"></a><span class='hs-definition'>batchMatrixBandPart</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20178"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-20179"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_lower__</span>
<a name="line-20180"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_upper__</span>
<a name="line-20181"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __band__</span>
<a name="line-20182"></a><span class='hs-definition'>batchMatrixBandPart</span> <span class='hs-varid'>input</span> <span class='hs-varid'>num_lower</span> <span class='hs-varid'>num_upper</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20183"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixBandPart"</span>
<a name="line-20184"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20185"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>num_lower</span> <span class='hs-varid'>num_upper</span>
<a name="line-20186"></a><span class='hs-comment'>{-
<a name="line-20187"></a>attr { name: "T" type: "type" }
<a name="line-20188"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-20189"></a>input_arg { name: "num_lower" type: DT_INT64 }
<a name="line-20190"></a>input_arg { name: "num_upper" type: DT_INT64 }
<a name="line-20191"></a>output_arg { name: "band" type_attr: "T" }
<a name="line-20192"></a>-}</span>
<a name="line-20193"></a>
<a name="line-20194"></a><span class='hs-comment'>-- | </span>
<a name="line-20195"></a>
<a name="line-20196"></a><a name="tensorArrayClose"></a><span class='hs-definition'>tensorArrayClose</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-20197"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-20198"></a><span class='hs-definition'>tensorArrayClose</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20199"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayClose"</span><span class='hs-layout'>)</span>
<a name="line-20200"></a>        <span class='hs-varid'>handle</span>
<a name="line-20201"></a><span class='hs-comment'>{-
<a name="line-20202"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-20203"></a>-}</span>
<a name="line-20204"></a>
<a name="line-20205"></a><a name="all"></a><span class='hs-comment'>-- | Computes the "logical and" of elements across dimensions of a tensor.</span>
<a name="line-20206"></a><span class='hs-comment'>--</span>
<a name="line-20207"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-20208"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-20209"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-20210"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-20211"></a><span class='hs-definition'>all</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20212"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20213"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-20214"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-20215"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-20216"></a><span class='hs-definition'>all</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20217"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"All"</span>
<a name="line-20218"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20219"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-20220"></a><span class='hs-comment'>{-
<a name="line-20221"></a>attr {
<a name="line-20222"></a>  default_value { b: false }
<a name="line-20223"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-20224"></a>  name: "keep_dims"
<a name="line-20225"></a>  type: "bool"
<a name="line-20226"></a>}
<a name="line-20227"></a>attr {
<a name="line-20228"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-20229"></a>  default_value { type: DT_INT32 }
<a name="line-20230"></a>  name: "Tidx"
<a name="line-20231"></a>  type: "type"
<a name="line-20232"></a>}
<a name="line-20233"></a>input_arg {
<a name="line-20234"></a>  description: "The tensor to reduce." name: "input" type: DT_BOOL
<a name="line-20235"></a>}
<a name="line-20236"></a>input_arg {
<a name="line-20237"></a>  description: "The dimensions to reduce."
<a name="line-20238"></a>  name: "reduction_indices"
<a name="line-20239"></a>  type_attr: "Tidx"
<a name="line-20240"></a>}
<a name="line-20241"></a>output_arg {
<a name="line-20242"></a>  description: "The reduced tensor." name: "output" type: DT_BOOL
<a name="line-20243"></a>}
<a name="line-20244"></a>-}</span>
<a name="line-20245"></a>
<a name="line-20246"></a><a name="readerNumRecordsProduced"></a><span class='hs-comment'>-- | Returns the number of records this Reader has produced.</span>
<a name="line-20247"></a><span class='hs-comment'>--</span>
<a name="line-20248"></a><span class='hs-comment'>-- This is the same as the number of ReaderRead executions that have</span>
<a name="line-20249"></a><span class='hs-comment'>-- succeeded.</span>
<a name="line-20250"></a><span class='hs-definition'>readerNumRecordsProduced</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-20251"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __records_produced__</span>
<a name="line-20252"></a><span class='hs-definition'>readerNumRecordsProduced</span> <span class='hs-varid'>reader_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20253"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderNumRecordsProduced"</span><span class='hs-layout'>)</span>
<a name="line-20254"></a>        <span class='hs-varid'>reader_handle</span>
<a name="line-20255"></a><span class='hs-comment'>{-
<a name="line-20256"></a>input_arg {
<a name="line-20257"></a>  description: "Handle to a Reader."
<a name="line-20258"></a>  is_ref: true
<a name="line-20259"></a>  name: "reader_handle"
<a name="line-20260"></a>  type: DT_STRING
<a name="line-20261"></a>}
<a name="line-20262"></a>output_arg { name: "records_produced" type: DT_INT64 }
<a name="line-20263"></a>-}</span>
<a name="line-20264"></a>
<a name="line-20265"></a><span class='hs-comment'>-- | Pop the element at the top of the stack.</span>
<a name="line-20266"></a>
<a name="line-20267"></a><a name="stackPop"></a><span class='hs-definition'>stackPop</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>elem_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>elem_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20268"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a stack.</span>
<a name="line-20269"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>elem_type</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __elem__: The tensor that is popped from the top of the stack.</span>
<a name="line-20270"></a><span class='hs-definition'>stackPop</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20271"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StackPop"</span>
<a name="line-20272"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"elem_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>elem_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20273"></a>        <span class='hs-varid'>handle</span>
<a name="line-20274"></a><span class='hs-comment'>{-
<a name="line-20275"></a>attr {
<a name="line-20276"></a>  description: "The type of the elem that is popped."
<a name="line-20277"></a>  name: "elem_type"
<a name="line-20278"></a>  type: "type"
<a name="line-20279"></a>}
<a name="line-20280"></a>input_arg {
<a name="line-20281"></a>  description: "The handle to a stack."
<a name="line-20282"></a>  is_ref: true
<a name="line-20283"></a>  name: "handle"
<a name="line-20284"></a>  type: DT_STRING
<a name="line-20285"></a>}
<a name="line-20286"></a>output_arg {
<a name="line-20287"></a>  description: "The tensor that is popped from the top of the stack."
<a name="line-20288"></a>  name: "elem"
<a name="line-20289"></a>  type_attr: "elem_type"
<a name="line-20290"></a>}
<a name="line-20291"></a>-}</span>
<a name="line-20292"></a>
<a name="line-20293"></a><a name="tensorArrayScatterV2"></a><span class='hs-comment'>-- | Scatter the data from the input value into specific TensorArray elements.</span>
<a name="line-20294"></a><span class='hs-comment'>--</span>
<a name="line-20295"></a><span class='hs-comment'>-- `indices` must be a vector, its length must match the first dim of `value`.</span>
<a name="line-20296"></a><span class='hs-definition'>tensorArrayScatterV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20297"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-20298"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __indices__: The locations at which to write the tensor elements.</span>
<a name="line-20299"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The concatenated tensor to write to the TensorArray.</span>
<a name="line-20300"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-20301"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_out__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-20302"></a><span class='hs-definition'>tensorArrayScatterV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20303"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayScatterV2"</span>
<a name="line-20304"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20305"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span>
<a name="line-20306"></a><span class='hs-comment'>{-
<a name="line-20307"></a>attr { name: "T" type: "type" }
<a name="line-20308"></a>input_arg {
<a name="line-20309"></a>  description: "The handle to a TensorArray."
<a name="line-20310"></a>  name: "handle"
<a name="line-20311"></a>  type: DT_STRING
<a name="line-20312"></a>}
<a name="line-20313"></a>input_arg {
<a name="line-20314"></a>  description: "The locations at which to write the tensor elements."
<a name="line-20315"></a>  name: "indices"
<a name="line-20316"></a>  type: DT_INT32
<a name="line-20317"></a>}
<a name="line-20318"></a>input_arg {
<a name="line-20319"></a>  description: "The concatenated tensor to write to the TensorArray."
<a name="line-20320"></a>  name: "value"
<a name="line-20321"></a>  type_attr: "T"
<a name="line-20322"></a>}
<a name="line-20323"></a>input_arg {
<a name="line-20324"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-20325"></a>  name: "flow_in"
<a name="line-20326"></a>  type: DT_FLOAT
<a name="line-20327"></a>}
<a name="line-20328"></a>output_arg {
<a name="line-20329"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-20330"></a>  name: "flow_out"
<a name="line-20331"></a>  type: DT_FLOAT
<a name="line-20332"></a>}
<a name="line-20333"></a>-}</span>
<a name="line-20334"></a>
<a name="line-20335"></a><a name="rGBToHSV"></a><span class='hs-comment'>-- | Converts one or more images from RGB to HSV.</span>
<a name="line-20336"></a><span class='hs-comment'>--</span>
<a name="line-20337"></a><span class='hs-comment'>-- Outputs a tensor of the same shape as the `images` tensor, containing the HSV</span>
<a name="line-20338"></a><span class='hs-comment'>-- value of the pixels. The output is only well defined if the value in `images`</span>
<a name="line-20339"></a><span class='hs-comment'>-- are in `[0,1]`.</span>
<a name="line-20340"></a><span class='hs-comment'>-- </span>
<a name="line-20341"></a><span class='hs-comment'>-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and</span>
<a name="line-20342"></a><span class='hs-comment'>-- `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0</span>
<a name="line-20343"></a><span class='hs-comment'>-- corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.</span>
<a name="line-20344"></a><span class='hs-definition'>rGBToHSV</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20345"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.</span>
<a name="line-20346"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: `images` converted to HSV.</span>
<a name="line-20347"></a><span class='hs-definition'>rGBToHSV</span> <span class='hs-varid'>images</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20348"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RGBToHSV"</span>
<a name="line-20349"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20350"></a>        <span class='hs-varid'>images</span>
<a name="line-20351"></a><span class='hs-comment'>{-
<a name="line-20352"></a>attr {
<a name="line-20353"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-20354"></a>  default_value { type: DT_FLOAT }
<a name="line-20355"></a>  name: "T"
<a name="line-20356"></a>  type: "type"
<a name="line-20357"></a>}
<a name="line-20358"></a>input_arg {
<a name="line-20359"></a>  description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3."
<a name="line-20360"></a>  name: "images"
<a name="line-20361"></a>  type_attr: "T"
<a name="line-20362"></a>}
<a name="line-20363"></a>output_arg {
<a name="line-20364"></a>  description: "`images` converted to HSV."
<a name="line-20365"></a>  name: "output"
<a name="line-20366"></a>  type_attr: "T"
<a name="line-20367"></a>}
<a name="line-20368"></a>-}</span>
<a name="line-20369"></a>
<a name="line-20370"></a><a name="serializeManySparse"></a><span class='hs-comment'>-- | Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.</span>
<a name="line-20371"></a><span class='hs-comment'>--</span>
<a name="line-20372"></a><span class='hs-comment'>-- The `SparseTensor` must have rank `R` greater than 1, and the first dimension</span>
<a name="line-20373"></a><span class='hs-comment'>-- is treated as the minibatch dimension.  Elements of the `SparseTensor`</span>
<a name="line-20374"></a><span class='hs-comment'>-- must be sorted in increasing order of this first dimension.  The serialized</span>
<a name="line-20375"></a><span class='hs-comment'>-- `SparseTensor` objects going into each row of `serialized_sparse` will have</span>
<a name="line-20376"></a><span class='hs-comment'>-- rank `R-1`.</span>
<a name="line-20377"></a><span class='hs-comment'>-- </span>
<a name="line-20378"></a><span class='hs-comment'>-- The minibatch size `N` is extracted from `sparse_shape[0]`.</span>
<a name="line-20379"></a><span class='hs-definition'>serializeManySparse</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20380"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.</span>
<a name="line-20381"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.</span>
<a name="line-20382"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.</span>
<a name="line-20383"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __serialized_sparse__</span>
<a name="line-20384"></a><span class='hs-definition'>serializeManySparse</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span>
<a name="line-20385"></a>                    <span class='hs-varid'>sparse_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20386"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SerializeManySparse"</span>
<a name="line-20387"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20388"></a>        <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>sparse_shape</span>
<a name="line-20389"></a><span class='hs-comment'>{-
<a name="line-20390"></a>attr { name: "T" type: "type" }
<a name="line-20391"></a>input_arg {
<a name="line-20392"></a>  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
<a name="line-20393"></a>  name: "sparse_indices"
<a name="line-20394"></a>  type: DT_INT64
<a name="line-20395"></a>}
<a name="line-20396"></a>input_arg {
<a name="line-20397"></a>  description: "1-D.  The `values` of the minibatch `SparseTensor`."
<a name="line-20398"></a>  name: "sparse_values"
<a name="line-20399"></a>  type_attr: "T"
<a name="line-20400"></a>}
<a name="line-20401"></a>input_arg {
<a name="line-20402"></a>  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
<a name="line-20403"></a>  name: "sparse_shape"
<a name="line-20404"></a>  type: DT_INT64
<a name="line-20405"></a>}
<a name="line-20406"></a>output_arg { name: "serialized_sparse" type: DT_STRING }
<a name="line-20407"></a>-}</span>
<a name="line-20408"></a>
<a name="line-20409"></a><a name="initializeTableFromTextFile"></a><span class='hs-comment'>-- | Initializes a table from a text file.</span>
<a name="line-20410"></a><span class='hs-comment'>--</span>
<a name="line-20411"></a><span class='hs-comment'>-- It inserts one key-value pair into the table for each line of the file.</span>
<a name="line-20412"></a><span class='hs-comment'>-- The key and value is extracted from the whole line content, elements from the</span>
<a name="line-20413"></a><span class='hs-comment'>-- split line based on `delimiter` or the line number (starting from zero).</span>
<a name="line-20414"></a><span class='hs-comment'>-- Where to extract the key and value from a line is specified by `key_index` and</span>
<a name="line-20415"></a><span class='hs-comment'>-- `value_index`.</span>
<a name="line-20416"></a><span class='hs-comment'>-- </span>
<a name="line-20417"></a><span class='hs-comment'>-- - A value of -1 means use the line number(starting from zero), expects `int64`.</span>
<a name="line-20418"></a><span class='hs-comment'>-- - A value of -2 means use the whole line content, expects `string`.</span>
<a name="line-20419"></a><span class='hs-comment'>-- - A value &gt;= 0 means use the index (starting at zero) of the split line based</span>
<a name="line-20420"></a><span class='hs-comment'>--   on `delimiter`.</span>
<a name="line-20421"></a><span class='hs-definition'>initializeTableFromTextFile</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __key_index__: Column index in a line to get the table `key` values from.</span>
<a name="line-20422"></a>                               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __value_index__: Column index that represents information of a line to get the table</span>
<a name="line-20423"></a>                                                 <span class='hs-comment'>-- `value` values from.</span>
<a name="line-20424"></a>                               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to a table which will be initialized.</span>
<a name="line-20425"></a>                               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filename__: Filename of a vocabulary text file.</span>
<a name="line-20426"></a>                               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-20427"></a><span class='hs-definition'>initializeTableFromTextFile</span> <span class='hs-varid'>key_index</span> <span class='hs-varid'>value_index</span> <span class='hs-varid'>table_handle</span>
<a name="line-20428"></a>                            <span class='hs-varid'>filename</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20429"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"InitializeTableFromTextFile"</span>
<a name="line-20430"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"key_index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>key_index</span>
<a name="line-20431"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"value_index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>value_index</span><span class='hs-layout'>)</span>
<a name="line-20432"></a>        <span class='hs-varid'>table_handle</span> <span class='hs-varid'>filename</span>
<a name="line-20433"></a><span class='hs-comment'>{-
<a name="line-20434"></a>attr {
<a name="line-20435"></a>  description: "Column index in a line to get the table `key` values from."
<a name="line-20436"></a>  has_minimum: true
<a name="line-20437"></a>  minimum: -2
<a name="line-20438"></a>  name: "key_index"
<a name="line-20439"></a>  type: "int"
<a name="line-20440"></a>}
<a name="line-20441"></a>attr {
<a name="line-20442"></a>  description: "Column index that represents information of a line to get the table\n`value` values from."
<a name="line-20443"></a>  has_minimum: true
<a name="line-20444"></a>  minimum: -2
<a name="line-20445"></a>  name: "value_index"
<a name="line-20446"></a>  type: "int"
<a name="line-20447"></a>}
<a name="line-20448"></a>attr {
<a name="line-20449"></a>  default_value { i: -1 }
<a name="line-20450"></a>  description: "Number of elements of the file, use -1 if unknown."
<a name="line-20451"></a>  has_minimum: true
<a name="line-20452"></a>  minimum: -1
<a name="line-20453"></a>  name: "vocab_size"
<a name="line-20454"></a>  type: "int"
<a name="line-20455"></a>}
<a name="line-20456"></a>attr {
<a name="line-20457"></a>  default_value { s: "\t" }
<a name="line-20458"></a>  description: "Delimiter to separate fields in a line."
<a name="line-20459"></a>  name: "delimiter"
<a name="line-20460"></a>  type: "string"
<a name="line-20461"></a>}
<a name="line-20462"></a>input_arg {
<a name="line-20463"></a>  description: "Handle to a table which will be initialized."
<a name="line-20464"></a>  is_ref: true
<a name="line-20465"></a>  name: "table_handle"
<a name="line-20466"></a>  type: DT_STRING
<a name="line-20467"></a>}
<a name="line-20468"></a>input_arg {
<a name="line-20469"></a>  description: "Filename of a vocabulary text file."
<a name="line-20470"></a>  name: "filename"
<a name="line-20471"></a>  type: DT_STRING
<a name="line-20472"></a>}
<a name="line-20473"></a>-}</span>
<a name="line-20474"></a>
<a name="line-20475"></a><a name="decodePng"></a><span class='hs-comment'>-- | Decode a PNG-encoded image to a uint8 or uint16 tensor.</span>
<a name="line-20476"></a><span class='hs-comment'>--</span>
<a name="line-20477"></a><span class='hs-comment'>-- The attr `channels` indicates the desired number of color channels for the</span>
<a name="line-20478"></a><span class='hs-comment'>-- decoded image.</span>
<a name="line-20479"></a><span class='hs-comment'>-- </span>
<a name="line-20480"></a><span class='hs-comment'>-- Accepted values are:</span>
<a name="line-20481"></a><span class='hs-comment'>-- </span>
<a name="line-20482"></a><span class='hs-comment'>-- *   0: Use the number of channels in the PNG-encoded image.</span>
<a name="line-20483"></a><span class='hs-comment'>-- *   1: output a grayscale image.</span>
<a name="line-20484"></a><span class='hs-comment'>-- *   3: output an RGB image.</span>
<a name="line-20485"></a><span class='hs-comment'>-- *   4: output an RGBA image.</span>
<a name="line-20486"></a><span class='hs-comment'>-- </span>
<a name="line-20487"></a><span class='hs-comment'>-- If needed, the PNG-encoded image is transformed to match the requested number</span>
<a name="line-20488"></a><span class='hs-comment'>-- of color channels.</span>
<a name="line-20489"></a><span class='hs-definition'>decodePng</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-20490"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20491"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: 0-D.  The PNG-encoded image.</span>
<a name="line-20492"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __image__: 3-D with shape `[height, width, channels]`.</span>
<a name="line-20493"></a><span class='hs-definition'>decodePng</span> <span class='hs-varid'>contents</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20494"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodePng"</span>
<a name="line-20495"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20496"></a>        <span class='hs-varid'>contents</span>
<a name="line-20497"></a><span class='hs-comment'>{-
<a name="line-20498"></a>attr {
<a name="line-20499"></a>  default_value { i: 0 }
<a name="line-20500"></a>  description: "Number of color channels for the decoded image."
<a name="line-20501"></a>  name: "channels"
<a name="line-20502"></a>  type: "int"
<a name="line-20503"></a>}
<a name="line-20504"></a>attr {
<a name="line-20505"></a>  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
<a name="line-20506"></a>  default_value { type: DT_UINT8 }
<a name="line-20507"></a>  name: "dtype"
<a name="line-20508"></a>  type: "type"
<a name="line-20509"></a>}
<a name="line-20510"></a>input_arg {
<a name="line-20511"></a>  description: "0-D.  The PNG-encoded image."
<a name="line-20512"></a>  name: "contents"
<a name="line-20513"></a>  type: DT_STRING
<a name="line-20514"></a>}
<a name="line-20515"></a>output_arg {
<a name="line-20516"></a>  description: "3-D with shape `[height, width, channels]`."
<a name="line-20517"></a>  name: "image"
<a name="line-20518"></a>  type_attr: "dtype"
<a name="line-20519"></a>}
<a name="line-20520"></a>-}</span>
<a name="line-20521"></a>
<a name="line-20522"></a><span class='hs-comment'>-- | Get the current size of the TensorArray.</span>
<a name="line-20523"></a>
<a name="line-20524"></a><a name="tensorArraySizeV2"></a><span class='hs-definition'>tensorArraySizeV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).</span>
<a name="line-20525"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-20526"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: The current size of the TensorArray.</span>
<a name="line-20527"></a><span class='hs-definition'>tensorArraySizeV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20528"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArraySizeV2"</span><span class='hs-layout'>)</span>
<a name="line-20529"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-20530"></a><span class='hs-comment'>{-
<a name="line-20531"></a>input_arg {
<a name="line-20532"></a>  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
<a name="line-20533"></a>  name: "handle"
<a name="line-20534"></a>  type: DT_STRING
<a name="line-20535"></a>}
<a name="line-20536"></a>input_arg {
<a name="line-20537"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-20538"></a>  name: "flow_in"
<a name="line-20539"></a>  type: DT_FLOAT
<a name="line-20540"></a>}
<a name="line-20541"></a>output_arg {
<a name="line-20542"></a>  description: "The current size of the TensorArray."
<a name="line-20543"></a>  name: "size"
<a name="line-20544"></a>  type: DT_INT32
<a name="line-20545"></a>}
<a name="line-20546"></a>-}</span>
<a name="line-20547"></a>
<a name="line-20548"></a><a name="div"></a><span class='hs-comment'>-- | Returns x / y element-wise.</span>
<a name="line-20549"></a><span class='hs-comment'>--</span>
<a name="line-20550"></a><span class='hs-comment'>-- *NOTE*: `Div` supports broadcasting. More about broadcasting</span>
<a name="line-20551"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-20552"></a><span class='hs-definition'>div</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-20553"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-20554"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20555"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-20556"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-20557"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-20558"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20559"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-20560"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-20561"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-20562"></a><span class='hs-definition'>div</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20563"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Div"</span>
<a name="line-20564"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20565"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-20566"></a><span class='hs-comment'>{-
<a name="line-20567"></a>attr {
<a name="line-20568"></a>  allowed_values {
<a name="line-20569"></a>    list {
<a name="line-20570"></a>      type: DT_HALF
<a name="line-20571"></a>      type: DT_FLOAT
<a name="line-20572"></a>      type: DT_DOUBLE
<a name="line-20573"></a>      type: DT_UINT8
<a name="line-20574"></a>      type: DT_INT8
<a name="line-20575"></a>      type: DT_UINT16
<a name="line-20576"></a>      type: DT_INT16
<a name="line-20577"></a>      type: DT_INT32
<a name="line-20578"></a>      type: DT_INT64
<a name="line-20579"></a>      type: DT_COMPLEX64
<a name="line-20580"></a>      type: DT_COMPLEX128
<a name="line-20581"></a>    }
<a name="line-20582"></a>  }
<a name="line-20583"></a>  name: "T"
<a name="line-20584"></a>  type: "type"
<a name="line-20585"></a>}
<a name="line-20586"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-20587"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-20588"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-20589"></a>-}</span>
<a name="line-20590"></a>
<a name="line-20591"></a><a name="logUniformCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a log-uniform distribution.</span>
<a name="line-20592"></a><span class='hs-comment'>--</span>
<a name="line-20593"></a><span class='hs-comment'>-- See explanations of candidate sampling and the data formats at</span>
<a name="line-20594"></a><span class='hs-comment'>-- go/candidate-sampling.</span>
<a name="line-20595"></a><span class='hs-comment'>-- </span>
<a name="line-20596"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-20597"></a><span class='hs-comment'>-- </span>
<a name="line-20598"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-20599"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-20600"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-20601"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-20602"></a><span class='hs-definition'>logUniformCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to randomly sample per batch.</span>
<a name="line-20603"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-20604"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).</span>
<a name="line-20605"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-20606"></a>                                      <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-20607"></a>                                      <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-20608"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-20609"></a>                                                          <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-20610"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-20611"></a>                                  <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-20612"></a>                              <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-20613"></a>                              <span class='hs-comment'>--</span>
<a name="line-20614"></a>                              <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-20615"></a>                              <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-20616"></a>                              <span class='hs-comment'>--</span>
<a name="line-20617"></a>                              <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-20618"></a>                              <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-20619"></a>                              <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-20620"></a>                              <span class='hs-comment'>--</span>
<a name="line-20621"></a>                              <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-20622"></a>                              <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-20623"></a>                              <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-20624"></a>                              <span class='hs-comment'>-- probability.</span>
<a name="line-20625"></a><span class='hs-definition'>logUniformCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>range_max</span> <span class='hs-varid'>unique</span>
<a name="line-20626"></a>                           <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20627"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LogUniformCandidateSampler"</span>
<a name="line-20628"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-20629"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-20630"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"range_max"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>range_max</span>
<a name="line-20631"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-20632"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-20633"></a><span class='hs-comment'>{-
<a name="line-20634"></a>attr {
<a name="line-20635"></a>  description: "Number of true labels per context."
<a name="line-20636"></a>  has_minimum: true
<a name="line-20637"></a>  minimum: 1
<a name="line-20638"></a>  name: "num_true"
<a name="line-20639"></a>  type: "int"
<a name="line-20640"></a>}
<a name="line-20641"></a>attr {
<a name="line-20642"></a>  description: "Number of candidates to randomly sample per batch."
<a name="line-20643"></a>  has_minimum: true
<a name="line-20644"></a>  minimum: 1
<a name="line-20645"></a>  name: "num_sampled"
<a name="line-20646"></a>  type: "int"
<a name="line-20647"></a>}
<a name="line-20648"></a>attr {
<a name="line-20649"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-20650"></a>  name: "unique"
<a name="line-20651"></a>  type: "bool"
<a name="line-20652"></a>}
<a name="line-20653"></a>attr {
<a name="line-20654"></a>  description: "The sampler will sample integers from the interval [0, range_max)."
<a name="line-20655"></a>  has_minimum: true
<a name="line-20656"></a>  minimum: 1
<a name="line-20657"></a>  name: "range_max"
<a name="line-20658"></a>  type: "int"
<a name="line-20659"></a>}
<a name="line-20660"></a>attr {
<a name="line-20661"></a>  default_value { i: 0 }
<a name="line-20662"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-20663"></a>  name: "seed"
<a name="line-20664"></a>  type: "int"
<a name="line-20665"></a>}
<a name="line-20666"></a>attr {
<a name="line-20667"></a>  default_value { i: 0 }
<a name="line-20668"></a>  description: "An second seed to avoid seed collision."
<a name="line-20669"></a>  name: "seed2"
<a name="line-20670"></a>  type: "int"
<a name="line-20671"></a>}
<a name="line-20672"></a>input_arg {
<a name="line-20673"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-20674"></a>  name: "true_classes"
<a name="line-20675"></a>  type: DT_INT64
<a name="line-20676"></a>}
<a name="line-20677"></a>output_arg {
<a name="line-20678"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-20679"></a>  name: "sampled_candidates"
<a name="line-20680"></a>  type: DT_INT64
<a name="line-20681"></a>}
<a name="line-20682"></a>output_arg {
<a name="line-20683"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-20684"></a>  name: "true_expected_count"
<a name="line-20685"></a>  type: DT_FLOAT
<a name="line-20686"></a>}
<a name="line-20687"></a>output_arg {
<a name="line-20688"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-20689"></a>  name: "sampled_expected_count"
<a name="line-20690"></a>  type: DT_FLOAT
<a name="line-20691"></a>}
<a name="line-20692"></a>-}</span>
<a name="line-20693"></a>
<a name="line-20694"></a><a name="barrier"></a><span class='hs-comment'>-- | Defines a barrier that persists across different graph executions.</span>
<a name="line-20695"></a><span class='hs-comment'>--</span>
<a name="line-20696"></a><span class='hs-comment'>-- A barrier represents a key-value map, where each key is a string, and</span>
<a name="line-20697"></a><span class='hs-comment'>-- each value is a tuple of tensors.</span>
<a name="line-20698"></a><span class='hs-comment'>-- </span>
<a name="line-20699"></a><span class='hs-comment'>-- At runtime, the barrier contains 'complete' and 'incomplete'</span>
<a name="line-20700"></a><span class='hs-comment'>-- elements. A complete element has defined tensors for all components of</span>
<a name="line-20701"></a><span class='hs-comment'>-- its value tuple, and may be accessed using BarrierTakeMany. An</span>
<a name="line-20702"></a><span class='hs-comment'>-- incomplete element has some undefined components in its value tuple,</span>
<a name="line-20703"></a><span class='hs-comment'>-- and may be updated using BarrierInsertMany.</span>
<a name="line-20704"></a><span class='hs-definition'>barrier</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __handle__: The handle to the barrier.</span>
<a name="line-20705"></a><span class='hs-definition'>barrier</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20706"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Barrier"</span><span class='hs-layout'>)</span>
<a name="line-20707"></a>        
<a name="line-20708"></a><span class='hs-comment'>{-
<a name="line-20709"></a>attr {
<a name="line-20710"></a>  description: "The type of each component in a value."
<a name="line-20711"></a>  has_minimum: true
<a name="line-20712"></a>  minimum: 1
<a name="line-20713"></a>  name: "component_types"
<a name="line-20714"></a>  type: "list(type)"
<a name="line-20715"></a>}
<a name="line-20716"></a>attr {
<a name="line-20717"></a>  default_value { list { } }
<a name="line-20718"></a>  description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types."
<a name="line-20719"></a>  has_minimum: true
<a name="line-20720"></a>  name: "shapes"
<a name="line-20721"></a>  type: "list(shape)"
<a name="line-20722"></a>}
<a name="line-20723"></a>attr {
<a name="line-20724"></a>  default_value { i: -1 }
<a name="line-20725"></a>  description: "The capacity of the barrier.  The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue."
<a name="line-20726"></a>  name: "capacity"
<a name="line-20727"></a>  type: "int"
<a name="line-20728"></a>}
<a name="line-20729"></a>attr {
<a name="line-20730"></a>  default_value { s: "" }
<a name="line-20731"></a>  description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used."
<a name="line-20732"></a>  name: "container"
<a name="line-20733"></a>  type: "string"
<a name="line-20734"></a>}
<a name="line-20735"></a>attr {
<a name="line-20736"></a>  default_value { s: "" }
<a name="line-20737"></a>  description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions."
<a name="line-20738"></a>  name: "shared_name"
<a name="line-20739"></a>  type: "string"
<a name="line-20740"></a>}
<a name="line-20741"></a>output_arg {
<a name="line-20742"></a>  description: "The handle to the barrier."
<a name="line-20743"></a>  is_ref: true
<a name="line-20744"></a>  name: "handle"
<a name="line-20745"></a>  type: DT_STRING
<a name="line-20746"></a>}
<a name="line-20747"></a>-}</span>
<a name="line-20748"></a>
<a name="line-20749"></a><span class='hs-comment'>-- | Creates a variable resource.</span>
<a name="line-20750"></a>
<a name="line-20751"></a><a name="createVariableOp"></a><span class='hs-definition'>createVariableOp</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20752"></a>                    <span class='hs-conid'>ResourceHandle</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __resource__: handle to the resource in which to store the variable.</span>
<a name="line-20753"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: the value to set the new tensor to use.</span>
<a name="line-20754"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-20755"></a><span class='hs-definition'>createVariableOp</span> <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20756"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CreateVariableOp"</span>
<a name="line-20757"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20758"></a>        <span class='hs-varid'>resource</span> <span class='hs-varid'>value</span>
<a name="line-20759"></a><span class='hs-comment'>{-
<a name="line-20760"></a>attr {
<a name="line-20761"></a>  description: "the dtype of the value." name: "dtype" type: "type"
<a name="line-20762"></a>}
<a name="line-20763"></a>input_arg {
<a name="line-20764"></a>  description: "handle to the resource in which to store the variable."
<a name="line-20765"></a>  name: "resource"
<a name="line-20766"></a>  type: DT_RESOURCE
<a name="line-20767"></a>}
<a name="line-20768"></a>input_arg {
<a name="line-20769"></a>  description: "the value to set the new tensor to use."
<a name="line-20770"></a>  name: "value"
<a name="line-20771"></a>  type_attr: "dtype"
<a name="line-20772"></a>}
<a name="line-20773"></a>-}</span>
<a name="line-20774"></a>
<a name="line-20775"></a><a name="accumulatorApplyGradient"></a><span class='hs-comment'>-- | Applies a gradient to a given accumulator. Does not add if local_step is lesser</span>
<a name="line-20776"></a><span class='hs-comment'>--</span>
<a name="line-20777"></a><span class='hs-comment'>-- than the accumulator's global_step.</span>
<a name="line-20778"></a><span class='hs-definition'>accumulatorApplyGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-20779"></a>                                                  <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-20780"></a>                                                          <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-20781"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-20782"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20783"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-20784"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-20785"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-20786"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-20787"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-20788"></a>                                                          <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20789"></a>                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a accumulator.</span>
<a name="line-20790"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __local_step__: The local_step value at which the gradient was computed.</span>
<a name="line-20791"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __gradient__: A tensor of the gradient to be accumulated.</span>
<a name="line-20792"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-20793"></a><span class='hs-definition'>accumulatorApplyGradient</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>local_step</span> <span class='hs-varid'>gradient</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20794"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AccumulatorApplyGradient"</span>
<a name="line-20795"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20796"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>local_step</span> <span class='hs-varid'>gradient</span>
<a name="line-20797"></a><span class='hs-comment'>{-
<a name="line-20798"></a>attr {
<a name="line-20799"></a>  allowed_values {
<a name="line-20800"></a>    list {
<a name="line-20801"></a>      type: DT_FLOAT
<a name="line-20802"></a>      type: DT_DOUBLE
<a name="line-20803"></a>      type: DT_INT64
<a name="line-20804"></a>      type: DT_INT32
<a name="line-20805"></a>      type: DT_UINT8
<a name="line-20806"></a>      type: DT_UINT16
<a name="line-20807"></a>      type: DT_INT16
<a name="line-20808"></a>      type: DT_INT8
<a name="line-20809"></a>      type: DT_COMPLEX64
<a name="line-20810"></a>      type: DT_COMPLEX128
<a name="line-20811"></a>      type: DT_QINT8
<a name="line-20812"></a>      type: DT_QUINT8
<a name="line-20813"></a>      type: DT_QINT32
<a name="line-20814"></a>      type: DT_HALF
<a name="line-20815"></a>    }
<a name="line-20816"></a>  }
<a name="line-20817"></a>  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
<a name="line-20818"></a>  name: "dtype"
<a name="line-20819"></a>  type: "type"
<a name="line-20820"></a>}
<a name="line-20821"></a>input_arg {
<a name="line-20822"></a>  description: "The handle to a accumulator."
<a name="line-20823"></a>  is_ref: true
<a name="line-20824"></a>  name: "handle"
<a name="line-20825"></a>  type: DT_STRING
<a name="line-20826"></a>}
<a name="line-20827"></a>input_arg {
<a name="line-20828"></a>  description: "The local_step value at which the gradient was computed."
<a name="line-20829"></a>  name: "local_step"
<a name="line-20830"></a>  type: DT_INT64
<a name="line-20831"></a>}
<a name="line-20832"></a>input_arg {
<a name="line-20833"></a>  description: "A tensor of the gradient to be accumulated."
<a name="line-20834"></a>  name: "gradient"
<a name="line-20835"></a>  type_attr: "dtype"
<a name="line-20836"></a>}
<a name="line-20837"></a>-}</span>
<a name="line-20838"></a>
<a name="line-20839"></a><a name="randomStandardNormal"></a><span class='hs-comment'>-- | Outputs random values from a normal distribution.</span>
<a name="line-20840"></a><span class='hs-comment'>--</span>
<a name="line-20841"></a><span class='hs-comment'>-- The generated values will have mean 0 and standard deviation 1.</span>
<a name="line-20842"></a><span class='hs-definition'>randomStandardNormal</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-20843"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-20844"></a>                                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-20845"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20846"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20847"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __shape__: The shape of the output tensor.</span>
<a name="line-20848"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor of the specified shape filled with random normal values.</span>
<a name="line-20849"></a><span class='hs-definition'>randomStandardNormal</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20850"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomStandardNormal"</span>
<a name="line-20851"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-20852"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20853"></a>        <span class='hs-varid'>shape</span>
<a name="line-20854"></a><span class='hs-comment'>{-
<a name="line-20855"></a>attr {
<a name="line-20856"></a>  default_value { i: 0 }
<a name="line-20857"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-20858"></a>  name: "seed"
<a name="line-20859"></a>  type: "int"
<a name="line-20860"></a>}
<a name="line-20861"></a>attr {
<a name="line-20862"></a>  default_value { i: 0 }
<a name="line-20863"></a>  description: "A second seed to avoid seed collision."
<a name="line-20864"></a>  name: "seed2"
<a name="line-20865"></a>  type: "int"
<a name="line-20866"></a>}
<a name="line-20867"></a>attr {
<a name="line-20868"></a>  allowed_values {
<a name="line-20869"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-20870"></a>  }
<a name="line-20871"></a>  description: "The type of the output."
<a name="line-20872"></a>  name: "dtype"
<a name="line-20873"></a>  type: "type"
<a name="line-20874"></a>}
<a name="line-20875"></a>attr {
<a name="line-20876"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-20877"></a>  name: "T"
<a name="line-20878"></a>  type: "type"
<a name="line-20879"></a>}
<a name="line-20880"></a>input_arg {
<a name="line-20881"></a>  description: "The shape of the output tensor."
<a name="line-20882"></a>  name: "shape"
<a name="line-20883"></a>  type_attr: "T"
<a name="line-20884"></a>}
<a name="line-20885"></a>output_arg {
<a name="line-20886"></a>  description: "A tensor of the specified shape filled with random normal values."
<a name="line-20887"></a>  name: "output"
<a name="line-20888"></a>  type_attr: "dtype"
<a name="line-20889"></a>}
<a name="line-20890"></a>-}</span>
<a name="line-20891"></a>
<a name="line-20892"></a><a name="parameterizedTruncatedNormal"></a><span class='hs-comment'>-- | Outputs random values from a normal distribution. The parameters may each be a</span>
<a name="line-20893"></a><span class='hs-comment'>--</span>
<a name="line-20894"></a><span class='hs-comment'>-- scalar which applies to the entire output, or a vector of length shape[0] which</span>
<a name="line-20895"></a><span class='hs-comment'>-- stores the parameters for each batch.</span>
<a name="line-20896"></a><span class='hs-definition'>parameterizedTruncatedNormal</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>dtype</span>
<a name="line-20897"></a>                                <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-20898"></a>                                                               <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-20899"></a>                                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-20900"></a>                                     <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-20901"></a>                                                           <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-20902"></a>                                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension.</span>
<a name="line-20903"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __means__: The mean parameter of each batch.</span>
<a name="line-20904"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0.</span>
<a name="line-20905"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __minvals__: The minimum cutoff. May be -infinity.</span>
<a name="line-20906"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval</span>
<a name="line-20907"></a>                                                   <span class='hs-comment'>-- for each batch.</span>
<a name="line-20908"></a>                                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random</span>
<a name="line-20909"></a>                                <span class='hs-comment'>-- truncated normal values using the parameters for each row.</span>
<a name="line-20910"></a><span class='hs-definition'>parameterizedTruncatedNormal</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>means</span> <span class='hs-varid'>stdevs</span> <span class='hs-varid'>minvals</span>
<a name="line-20911"></a>                             <span class='hs-varid'>maxvals</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20912"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ParameterizedTruncatedNormal"</span>
<a name="line-20913"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-20914"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-20915"></a>        <span class='hs-varid'>shape</span> <span class='hs-varid'>means</span> <span class='hs-varid'>stdevs</span> <span class='hs-varid'>minvals</span> <span class='hs-varid'>maxvals</span>
<a name="line-20916"></a><span class='hs-comment'>{-
<a name="line-20917"></a>attr {
<a name="line-20918"></a>  default_value { i: 0 }
<a name="line-20919"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-20920"></a>  name: "seed"
<a name="line-20921"></a>  type: "int"
<a name="line-20922"></a>}
<a name="line-20923"></a>attr {
<a name="line-20924"></a>  default_value { i: 0 }
<a name="line-20925"></a>  description: "A second seed to avoid seed collision."
<a name="line-20926"></a>  name: "seed2"
<a name="line-20927"></a>  type: "int"
<a name="line-20928"></a>}
<a name="line-20929"></a>attr {
<a name="line-20930"></a>  allowed_values {
<a name="line-20931"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-20932"></a>  }
<a name="line-20933"></a>  description: "The type of the output."
<a name="line-20934"></a>  name: "dtype"
<a name="line-20935"></a>  type: "type"
<a name="line-20936"></a>}
<a name="line-20937"></a>attr {
<a name="line-20938"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-20939"></a>  name: "T"
<a name="line-20940"></a>  type: "type"
<a name="line-20941"></a>}
<a name="line-20942"></a>input_arg {
<a name="line-20943"></a>  description: "The shape of the output tensor. Batches are indexed by the 0th dimension."
<a name="line-20944"></a>  name: "shape"
<a name="line-20945"></a>  type_attr: "T"
<a name="line-20946"></a>}
<a name="line-20947"></a>input_arg {
<a name="line-20948"></a>  description: "The mean parameter of each batch."
<a name="line-20949"></a>  name: "means"
<a name="line-20950"></a>  type_attr: "dtype"
<a name="line-20951"></a>}
<a name="line-20952"></a>input_arg {
<a name="line-20953"></a>  description: "The standard deviation parameter of each batch. Must be greater than 0."
<a name="line-20954"></a>  name: "stdevs"
<a name="line-20955"></a>  type_attr: "dtype"
<a name="line-20956"></a>}
<a name="line-20957"></a>input_arg {
<a name="line-20958"></a>  description: "The minimum cutoff. May be -infinity."
<a name="line-20959"></a>  name: "minvals"
<a name="line-20960"></a>  type_attr: "dtype"
<a name="line-20961"></a>}
<a name="line-20962"></a>input_arg {
<a name="line-20963"></a>  description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch."
<a name="line-20964"></a>  name: "maxvals"
<a name="line-20965"></a>  type_attr: "dtype"
<a name="line-20966"></a>}
<a name="line-20967"></a>output_arg {
<a name="line-20968"></a>  description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row."
<a name="line-20969"></a>  name: "output"
<a name="line-20970"></a>  type_attr: "dtype"
<a name="line-20971"></a>}
<a name="line-20972"></a>-}</span>
<a name="line-20973"></a>
<a name="line-20974"></a><a name="accumulatorSetGlobalStep"></a><span class='hs-comment'>-- | Updates the accumulator with a new value for global_step. Logs warning if the</span>
<a name="line-20975"></a><span class='hs-comment'>--</span>
<a name="line-20976"></a><span class='hs-comment'>-- accumulator's value is already higher than new_global_step.</span>
<a name="line-20977"></a><span class='hs-definition'>accumulatorSetGlobalStep</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to an accumulator.</span>
<a name="line-20978"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __new_global_step__: The new global_step value to set.</span>
<a name="line-20979"></a>                            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-20980"></a><span class='hs-definition'>accumulatorSetGlobalStep</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>new_global_step</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-20981"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AccumulatorSetGlobalStep"</span><span class='hs-layout'>)</span>
<a name="line-20982"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>new_global_step</span>
<a name="line-20983"></a><span class='hs-comment'>{-
<a name="line-20984"></a>input_arg {
<a name="line-20985"></a>  description: "The handle to an accumulator."
<a name="line-20986"></a>  is_ref: true
<a name="line-20987"></a>  name: "handle"
<a name="line-20988"></a>  type: DT_STRING
<a name="line-20989"></a>}
<a name="line-20990"></a>input_arg {
<a name="line-20991"></a>  description: "The new global_step value to set."
<a name="line-20992"></a>  name: "new_global_step"
<a name="line-20993"></a>  type: DT_INT64
<a name="line-20994"></a>}
<a name="line-20995"></a>-}</span>
<a name="line-20996"></a>
<a name="line-20997"></a><a name="resizeBilinear"></a><span class='hs-comment'>-- | Resize `images` to `size` using bilinear interpolation.</span>
<a name="line-20998"></a><span class='hs-comment'>--</span>
<a name="line-20999"></a><span class='hs-comment'>-- Input images can be of different types but output images are always float.</span>
<a name="line-21000"></a><span class='hs-definition'>resizeBilinear</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-21001"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21002"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-21003"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-21004"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-21005"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-21006"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21007"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-21008"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The</span>
<a name="line-21009"></a>                                              <span class='hs-comment'>-- new size for the images.</span>
<a name="line-21010"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __resized_images__: 4-D with shape</span>
<a name="line-21011"></a>                  <span class='hs-comment'>-- `[batch, new_height, new_width, channels]`.</span>
<a name="line-21012"></a><span class='hs-definition'>resizeBilinear</span> <span class='hs-varid'>images</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21013"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeBilinear"</span>
<a name="line-21014"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21015"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>size</span>
<a name="line-21016"></a><span class='hs-comment'>{-
<a name="line-21017"></a>attr {
<a name="line-21018"></a>  allowed_values {
<a name="line-21019"></a>    list {
<a name="line-21020"></a>      type: DT_UINT8
<a name="line-21021"></a>      type: DT_INT8
<a name="line-21022"></a>      type: DT_INT16
<a name="line-21023"></a>      type: DT_INT32
<a name="line-21024"></a>      type: DT_INT64
<a name="line-21025"></a>      type: DT_HALF
<a name="line-21026"></a>      type: DT_FLOAT
<a name="line-21027"></a>      type: DT_DOUBLE
<a name="line-21028"></a>    }
<a name="line-21029"></a>  }
<a name="line-21030"></a>  name: "T"
<a name="line-21031"></a>  type: "type"
<a name="line-21032"></a>}
<a name="line-21033"></a>attr {
<a name="line-21034"></a>  default_value { b: false }
<a name="line-21035"></a>  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
<a name="line-21036"></a>  name: "align_corners"
<a name="line-21037"></a>  type: "bool"
<a name="line-21038"></a>}
<a name="line-21039"></a>input_arg {
<a name="line-21040"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-21041"></a>  name: "images"
<a name="line-21042"></a>  type_attr: "T"
<a name="line-21043"></a>}
<a name="line-21044"></a>input_arg {
<a name="line-21045"></a>  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
<a name="line-21046"></a>  name: "size"
<a name="line-21047"></a>  type: DT_INT32
<a name="line-21048"></a>}
<a name="line-21049"></a>output_arg {
<a name="line-21050"></a>  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
<a name="line-21051"></a>  name: "resized_images"
<a name="line-21052"></a>  type: DT_FLOAT
<a name="line-21053"></a>}
<a name="line-21054"></a>-}</span>
<a name="line-21055"></a>
<a name="line-21056"></a><a name="quantizeV2"></a><span class='hs-comment'>-- | Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.</span>
<a name="line-21057"></a><span class='hs-comment'>--</span>
<a name="line-21058"></a><span class='hs-comment'>-- [min_range, max_range] are scalar floats that specify the range for</span>
<a name="line-21059"></a><span class='hs-comment'>-- the 'input' data. The 'mode' attribute controls exactly which calculations are</span>
<a name="line-21060"></a><span class='hs-comment'>-- used to convert the float values to their quantized equivalents.</span>
<a name="line-21061"></a><span class='hs-comment'>-- </span>
<a name="line-21062"></a><span class='hs-comment'>-- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:</span>
<a name="line-21063"></a><span class='hs-comment'>-- </span>
<a name="line-21064"></a><span class='hs-comment'>-- ```</span>
<a name="line-21065"></a><span class='hs-comment'>-- out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)</span>
<a name="line-21066"></a><span class='hs-comment'>-- if T == qint8, out[i] -= (range(T) + 1) / 2.0</span>
<a name="line-21067"></a><span class='hs-comment'>-- ```</span>
<a name="line-21068"></a><span class='hs-comment'>-- here `range(T) = numeric_limits&lt;T&gt;::max() - numeric_limits&lt;T&gt;::min()`</span>
<a name="line-21069"></a><span class='hs-comment'>-- </span>
<a name="line-21070"></a><span class='hs-comment'>-- *MIN_COMBINED Mode Example*</span>
<a name="line-21071"></a><span class='hs-comment'>-- </span>
<a name="line-21072"></a><span class='hs-comment'>-- Assume the input is type float and has a possible range of [0.0, 6.0] and the</span>
<a name="line-21073"></a><span class='hs-comment'>-- output type is quint8 ([0, 255]). The min_range and max_range values should be</span>
<a name="line-21074"></a><span class='hs-comment'>-- specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each</span>
<a name="line-21075"></a><span class='hs-comment'>-- value of the input by 255/6 and cast to quint8.</span>
<a name="line-21076"></a><span class='hs-comment'>-- </span>
<a name="line-21077"></a><span class='hs-comment'>-- If the output type was qint8 ([-128, 127]), the operation will additionally</span>
<a name="line-21078"></a><span class='hs-comment'>-- subtract each value by 128 prior to casting, so that the range of values aligns</span>
<a name="line-21079"></a><span class='hs-comment'>-- with the range of qint8.</span>
<a name="line-21080"></a><span class='hs-comment'>-- </span>
<a name="line-21081"></a><span class='hs-comment'>-- If the mode is 'MIN_FIRST', then this approach is used:</span>
<a name="line-21082"></a><span class='hs-comment'>-- </span>
<a name="line-21083"></a><span class='hs-comment'>-- ```</span>
<a name="line-21084"></a><span class='hs-comment'>-- number_of_steps = 1 &lt;&lt; (# of bits in T)</span>
<a name="line-21085"></a><span class='hs-comment'>-- range_adjust = number_of_steps / (number_of_steps - 1)</span>
<a name="line-21086"></a><span class='hs-comment'>-- range = (range_max - range_min) * range_adjust</span>
<a name="line-21087"></a><span class='hs-comment'>-- range_scale = number_of_steps / range</span>
<a name="line-21088"></a><span class='hs-comment'>-- quantized = round(input * range_scale) - round(range_min * range_scale) +</span>
<a name="line-21089"></a><span class='hs-comment'>--   numeric_limits&lt;T&gt;::min()</span>
<a name="line-21090"></a><span class='hs-comment'>-- quantized = max(quantized, numeric_limits&lt;T&gt;::min())</span>
<a name="line-21091"></a><span class='hs-comment'>-- quantized = min(quantized, numeric_limits&lt;T&gt;::max())</span>
<a name="line-21092"></a><span class='hs-comment'>-- ```</span>
<a name="line-21093"></a><span class='hs-comment'>-- </span>
<a name="line-21094"></a><span class='hs-comment'>-- The biggest difference between this and MIN_COMBINED is that the minimum range</span>
<a name="line-21095"></a><span class='hs-comment'>-- is rounded first, before it's subtracted from the rounded value. With</span>
<a name="line-21096"></a><span class='hs-comment'>-- MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing</span>
<a name="line-21097"></a><span class='hs-comment'>-- and dequantizing will introduce a larger and larger error.</span>
<a name="line-21098"></a><span class='hs-comment'>-- </span>
<a name="line-21099"></a><span class='hs-comment'>-- One thing to watch out for is that the operator may choose to adjust the</span>
<a name="line-21100"></a><span class='hs-comment'>-- requested minimum and maximum values slightly during the quantization process,</span>
<a name="line-21101"></a><span class='hs-comment'>-- so you should always use the output ports as the range for further calculations.</span>
<a name="line-21102"></a><span class='hs-comment'>-- For example, if the requested minimum and maximum values are close to equal,</span>
<a name="line-21103"></a><span class='hs-comment'>-- they will be separated by a small epsilon value to prevent ill-formed quantized</span>
<a name="line-21104"></a><span class='hs-comment'>-- buffers from being created. Otherwise, you can end up with buffers where all the</span>
<a name="line-21105"></a><span class='hs-comment'>-- quantized values map to the same float value, which causes problems for</span>
<a name="line-21106"></a><span class='hs-comment'>-- operations that have to perform further calculations on them.</span>
<a name="line-21107"></a><span class='hs-definition'>quantizeV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-21108"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21109"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-21110"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21111"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-21112"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_range__: The minimum scalar value possibly produced for the input.</span>
<a name="line-21113"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_range__: The maximum scalar value possibly produced for the input.</span>
<a name="line-21114"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-21115"></a>              <span class='hs-comment'>-- ^ (__output__, __output_min__, __output_max__)</span>
<a name="line-21116"></a>              <span class='hs-comment'>--</span>
<a name="line-21117"></a>              <span class='hs-comment'>-- * __output__: The quantized data produced from the float input.</span>
<a name="line-21118"></a>              <span class='hs-comment'>--</span>
<a name="line-21119"></a>              <span class='hs-comment'>-- * __output_min__: The actual minimum scalar value used for the output.</span>
<a name="line-21120"></a>              <span class='hs-comment'>--</span>
<a name="line-21121"></a>              <span class='hs-comment'>-- * __output_max__: The actual maximum scalar value used for the output.</span>
<a name="line-21122"></a><span class='hs-definition'>quantizeV2</span> <span class='hs-varid'>input</span> <span class='hs-varid'>min_range</span> <span class='hs-varid'>max_range</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21123"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizeV2"</span>
<a name="line-21124"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21125"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>min_range</span> <span class='hs-varid'>max_range</span>
<a name="line-21126"></a><span class='hs-comment'>{-
<a name="line-21127"></a>attr {
<a name="line-21128"></a>  allowed_values {
<a name="line-21129"></a>    list {
<a name="line-21130"></a>      type: DT_QINT8
<a name="line-21131"></a>      type: DT_QUINT8
<a name="line-21132"></a>      type: DT_QINT16
<a name="line-21133"></a>      type: DT_QUINT16
<a name="line-21134"></a>      type: DT_QINT32
<a name="line-21135"></a>    }
<a name="line-21136"></a>  }
<a name="line-21137"></a>  name: "T"
<a name="line-21138"></a>  type: "type"
<a name="line-21139"></a>}
<a name="line-21140"></a>attr {
<a name="line-21141"></a>  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
<a name="line-21142"></a>  default_value { s: "MIN_COMBINED" }
<a name="line-21143"></a>  name: "mode"
<a name="line-21144"></a>  type: "string"
<a name="line-21145"></a>}
<a name="line-21146"></a>input_arg { name: "input" type: DT_FLOAT }
<a name="line-21147"></a>input_arg {
<a name="line-21148"></a>  description: "The minimum scalar value possibly produced for the input."
<a name="line-21149"></a>  name: "min_range"
<a name="line-21150"></a>  type: DT_FLOAT
<a name="line-21151"></a>}
<a name="line-21152"></a>input_arg {
<a name="line-21153"></a>  description: "The maximum scalar value possibly produced for the input."
<a name="line-21154"></a>  name: "max_range"
<a name="line-21155"></a>  type: DT_FLOAT
<a name="line-21156"></a>}
<a name="line-21157"></a>output_arg {
<a name="line-21158"></a>  description: "The quantized data produced from the float input."
<a name="line-21159"></a>  name: "output"
<a name="line-21160"></a>  type_attr: "T"
<a name="line-21161"></a>}
<a name="line-21162"></a>output_arg {
<a name="line-21163"></a>  description: "The actual minimum scalar value used for the output."
<a name="line-21164"></a>  name: "output_min"
<a name="line-21165"></a>  type: DT_FLOAT
<a name="line-21166"></a>}
<a name="line-21167"></a>output_arg {
<a name="line-21168"></a>  description: "The actual maximum scalar value used for the output."
<a name="line-21169"></a>  name: "output_max"
<a name="line-21170"></a>  type: DT_FLOAT
<a name="line-21171"></a>}
<a name="line-21172"></a>-}</span>
<a name="line-21173"></a>
<a name="line-21174"></a><a name="decodeJpeg"></a><span class='hs-comment'>-- | Decode a JPEG-encoded image to a uint8 tensor.</span>
<a name="line-21175"></a><span class='hs-comment'>--</span>
<a name="line-21176"></a><span class='hs-comment'>-- The attr `channels` indicates the desired number of color channels for the</span>
<a name="line-21177"></a><span class='hs-comment'>-- decoded image.</span>
<a name="line-21178"></a><span class='hs-comment'>-- </span>
<a name="line-21179"></a><span class='hs-comment'>-- Accepted values are:</span>
<a name="line-21180"></a><span class='hs-comment'>-- </span>
<a name="line-21181"></a><span class='hs-comment'>-- *   0: Use the number of channels in the JPEG-encoded image.</span>
<a name="line-21182"></a><span class='hs-comment'>-- *   1: output a grayscale image.</span>
<a name="line-21183"></a><span class='hs-comment'>-- *   3: output an RGB image.</span>
<a name="line-21184"></a><span class='hs-comment'>-- </span>
<a name="line-21185"></a><span class='hs-comment'>-- If needed, the JPEG-encoded image is transformed to match the requested number</span>
<a name="line-21186"></a><span class='hs-comment'>-- of color channels.</span>
<a name="line-21187"></a><span class='hs-comment'>-- </span>
<a name="line-21188"></a><span class='hs-comment'>-- The attr `ratio` allows downscaling the image by an integer factor during</span>
<a name="line-21189"></a><span class='hs-comment'>-- decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than</span>
<a name="line-21190"></a><span class='hs-comment'>-- downscaling the image later.</span>
<a name="line-21191"></a><span class='hs-definition'>decodeJpeg</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: 0-D.  The JPEG-encoded image.</span>
<a name="line-21192"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span> <span class='hs-comment'>-- ^ __image__: 3-D with shape `[height, width, channels]`..</span>
<a name="line-21193"></a><span class='hs-definition'>decodeJpeg</span> <span class='hs-varid'>contents</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21194"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodeJpeg"</span><span class='hs-layout'>)</span>
<a name="line-21195"></a>        <span class='hs-varid'>contents</span>
<a name="line-21196"></a><span class='hs-comment'>{-
<a name="line-21197"></a>attr {
<a name="line-21198"></a>  default_value { i: 0 }
<a name="line-21199"></a>  description: "Number of color channels for the decoded image."
<a name="line-21200"></a>  name: "channels"
<a name="line-21201"></a>  type: "int"
<a name="line-21202"></a>}
<a name="line-21203"></a>attr {
<a name="line-21204"></a>  default_value { i: 1 }
<a name="line-21205"></a>  description: "Downscaling ratio."
<a name="line-21206"></a>  name: "ratio"
<a name="line-21207"></a>  type: "int"
<a name="line-21208"></a>}
<a name="line-21209"></a>attr {
<a name="line-21210"></a>  default_value { b: true }
<a name="line-21211"></a>  description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
<a name="line-21212"></a>  name: "fancy_upscaling"
<a name="line-21213"></a>  type: "bool"
<a name="line-21214"></a>}
<a name="line-21215"></a>attr {
<a name="line-21216"></a>  default_value { b: false }
<a name="line-21217"></a>  description: "If true try to recover an image from truncated input."
<a name="line-21218"></a>  name: "try_recover_truncated"
<a name="line-21219"></a>  type: "bool"
<a name="line-21220"></a>}
<a name="line-21221"></a>attr {
<a name="line-21222"></a>  default_value { f: 1.0 }
<a name="line-21223"></a>  description: "The minimum required fraction of lines before a truncated\ninput is accepted."
<a name="line-21224"></a>  name: "acceptable_fraction"
<a name="line-21225"></a>  type: "float"
<a name="line-21226"></a>}
<a name="line-21227"></a>input_arg {
<a name="line-21228"></a>  description: "0-D.  The JPEG-encoded image."
<a name="line-21229"></a>  name: "contents"
<a name="line-21230"></a>  type: DT_STRING
<a name="line-21231"></a>}
<a name="line-21232"></a>output_arg {
<a name="line-21233"></a>  description: "3-D with shape `[height, width, channels]`.."
<a name="line-21234"></a>  name: "image"
<a name="line-21235"></a>  type: DT_UINT8
<a name="line-21236"></a>}
<a name="line-21237"></a>-}</span>
<a name="line-21238"></a>
<a name="line-21239"></a><a name="pow"></a><span class='hs-comment'>-- | Computes the power of one value to another.</span>
<a name="line-21240"></a><span class='hs-comment'>--</span>
<a name="line-21241"></a><span class='hs-comment'>-- Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for</span>
<a name="line-21242"></a><span class='hs-comment'>-- corresponding elements in `x` and `y`. For example:</span>
<a name="line-21243"></a><span class='hs-comment'>-- </span>
<a name="line-21244"></a><span class='hs-comment'>-- ```</span>
<a name="line-21245"></a><span class='hs-comment'>-- # tensor 'x' is [[2, 2]], [3, 3]]</span>
<a name="line-21246"></a><span class='hs-comment'>-- # tensor 'y' is [[8, 16], [2, 3]]</span>
<a name="line-21247"></a><span class='hs-comment'>-- tf.pow(x, y) ==&gt; [[256, 65536], [9, 27]]</span>
<a name="line-21248"></a><span class='hs-comment'>-- ```</span>
<a name="line-21249"></a><span class='hs-definition'>pow</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21250"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21251"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-21252"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-21253"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21254"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-21255"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-21256"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-21257"></a><span class='hs-definition'>pow</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21258"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Pow"</span>
<a name="line-21259"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21260"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-21261"></a><span class='hs-comment'>{-
<a name="line-21262"></a>attr {
<a name="line-21263"></a>  allowed_values {
<a name="line-21264"></a>    list {
<a name="line-21265"></a>      type: DT_HALF
<a name="line-21266"></a>      type: DT_FLOAT
<a name="line-21267"></a>      type: DT_DOUBLE
<a name="line-21268"></a>      type: DT_INT32
<a name="line-21269"></a>      type: DT_INT64
<a name="line-21270"></a>      type: DT_COMPLEX64
<a name="line-21271"></a>      type: DT_COMPLEX128
<a name="line-21272"></a>    }
<a name="line-21273"></a>  }
<a name="line-21274"></a>  name: "T"
<a name="line-21275"></a>  type: "type"
<a name="line-21276"></a>}
<a name="line-21277"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-21278"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-21279"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-21280"></a>-}</span>
<a name="line-21281"></a>
<a name="line-21282"></a><a name="loopCond"></a><span class='hs-comment'>-- | Forwards the input to the output.</span>
<a name="line-21283"></a><span class='hs-comment'>--</span>
<a name="line-21284"></a><span class='hs-comment'>-- This operator represents the loop termination condition used by the</span>
<a name="line-21285"></a><span class='hs-comment'>-- "pivot" switches of a loop.</span>
<a name="line-21286"></a><span class='hs-definition'>loopCond</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op.</span>
<a name="line-21287"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `input`.</span>
<a name="line-21288"></a><span class='hs-definition'>loopCond</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21289"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LoopCond"</span><span class='hs-layout'>)</span>
<a name="line-21290"></a>        <span class='hs-varid'>input</span>
<a name="line-21291"></a><span class='hs-comment'>{-
<a name="line-21292"></a>input_arg {
<a name="line-21293"></a>  description: "A boolean scalar, representing the branch predicate of the Switch op."
<a name="line-21294"></a>  name: "input"
<a name="line-21295"></a>  type: DT_BOOL
<a name="line-21296"></a>}
<a name="line-21297"></a>output_arg {
<a name="line-21298"></a>  description: "The same tensor as `input`."
<a name="line-21299"></a>  name: "output"
<a name="line-21300"></a>  type: DT_BOOL
<a name="line-21301"></a>}
<a name="line-21302"></a>-}</span>
<a name="line-21303"></a>
<a name="line-21304"></a><span class='hs-comment'>-- | Reads and outputs the entire contents of the input filename.</span>
<a name="line-21305"></a>
<a name="line-21306"></a><a name="readFile"></a><span class='hs-definition'>readFile</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filename__</span>
<a name="line-21307"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__</span>
<a name="line-21308"></a><span class='hs-definition'>readFile</span> <span class='hs-varid'>filename</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21309"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReadFile"</span><span class='hs-layout'>)</span>
<a name="line-21310"></a>        <span class='hs-varid'>filename</span>
<a name="line-21311"></a><span class='hs-comment'>{-
<a name="line-21312"></a>input_arg { name: "filename" type: DT_STRING }
<a name="line-21313"></a>output_arg { name: "contents" type: DT_STRING }
<a name="line-21314"></a>-}</span>
<a name="line-21315"></a>
<a name="line-21316"></a><a name="imag"></a><span class='hs-comment'>-- | Returns the imaginary part of a complex number.</span>
<a name="line-21317"></a><span class='hs-comment'>--</span>
<a name="line-21318"></a><span class='hs-comment'>-- Given a tensor `input` of complex numbers, this operation returns a tensor of</span>
<a name="line-21319"></a><span class='hs-comment'>-- type `float` that is the imaginary part of each element in `input`. All</span>
<a name="line-21320"></a><span class='hs-comment'>-- elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*</span>
<a name="line-21321"></a><span class='hs-comment'>-- is the real part and *b* is the imaginary part returned by this operation.</span>
<a name="line-21322"></a><span class='hs-comment'>-- </span>
<a name="line-21323"></a><span class='hs-comment'>-- For example:</span>
<a name="line-21324"></a><span class='hs-comment'>-- </span>
<a name="line-21325"></a><span class='hs-comment'>-- ```</span>
<a name="line-21326"></a><span class='hs-comment'>-- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]</span>
<a name="line-21327"></a><span class='hs-comment'>-- tf.imag(input) ==&gt; [4.75, 5.75]</span>
<a name="line-21328"></a><span class='hs-comment'>-- ```</span>
<a name="line-21329"></a><span class='hs-definition'>imag</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21330"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-21331"></a>                            <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21332"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-21333"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-21334"></a><span class='hs-definition'>imag</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21335"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Imag"</span>
<a name="line-21336"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-21337"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21338"></a>        <span class='hs-varid'>input</span>
<a name="line-21339"></a><span class='hs-comment'>{-
<a name="line-21340"></a>attr {
<a name="line-21341"></a>  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
<a name="line-21342"></a>  default_value { type: DT_COMPLEX64 }
<a name="line-21343"></a>  name: "T"
<a name="line-21344"></a>  type: "type"
<a name="line-21345"></a>}
<a name="line-21346"></a>attr {
<a name="line-21347"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-21348"></a>  default_value { type: DT_FLOAT }
<a name="line-21349"></a>  name: "Tout"
<a name="line-21350"></a>  type: "type"
<a name="line-21351"></a>}
<a name="line-21352"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-21353"></a>output_arg { name: "output" type_attr: "Tout" }
<a name="line-21354"></a>-}</span>
<a name="line-21355"></a>
<a name="line-21356"></a><span class='hs-comment'>-- | </span>
<a name="line-21357"></a>
<a name="line-21358"></a><a name="tensorArrayGrad"></a><span class='hs-definition'>tensorArrayGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-21359"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-21360"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __grad_handle__</span>
<a name="line-21361"></a><span class='hs-definition'>tensorArrayGrad</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21362"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayGrad"</span><span class='hs-layout'>)</span>
<a name="line-21363"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-21364"></a><span class='hs-comment'>{-
<a name="line-21365"></a>attr { name: "source" type: "string" }
<a name="line-21366"></a>input_arg { name: "handle" type: DT_STRING }
<a name="line-21367"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-21368"></a>output_arg { is_ref: true name: "grad_handle" type: DT_STRING }
<a name="line-21369"></a>-}</span>
<a name="line-21370"></a>
<a name="line-21371"></a><a name="histogramSummary"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with a histogram.</span>
<a name="line-21372"></a><span class='hs-comment'>--</span>
<a name="line-21373"></a><span class='hs-comment'>-- The generated</span>
<a name="line-21374"></a><span class='hs-comment'>-- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)</span>
<a name="line-21375"></a><span class='hs-comment'>-- has one summary value containing a histogram for `values`.</span>
<a name="line-21376"></a><span class='hs-comment'>-- </span>
<a name="line-21377"></a><span class='hs-comment'>-- This op reports an `InvalidArgument` error if any value is not finite.</span>
<a name="line-21378"></a><span class='hs-definition'>histogramSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-21379"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21380"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-21381"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-21382"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-21383"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-21384"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21385"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tag__: Scalar.  Tag to use for the `Summary.Value`.</span>
<a name="line-21386"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __values__: Any shape. Values to use to build the histogram.</span>
<a name="line-21387"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.</span>
<a name="line-21388"></a><span class='hs-definition'>histogramSummary</span> <span class='hs-varid'>tag</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21389"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"HistogramSummary"</span>
<a name="line-21390"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21391"></a>        <span class='hs-varid'>tag</span> <span class='hs-varid'>values</span>
<a name="line-21392"></a><span class='hs-comment'>{-
<a name="line-21393"></a>attr {
<a name="line-21394"></a>  allowed_values {
<a name="line-21395"></a>    list {
<a name="line-21396"></a>      type: DT_FLOAT
<a name="line-21397"></a>      type: DT_DOUBLE
<a name="line-21398"></a>      type: DT_INT32
<a name="line-21399"></a>      type: DT_INT64
<a name="line-21400"></a>      type: DT_UINT8
<a name="line-21401"></a>      type: DT_INT16
<a name="line-21402"></a>      type: DT_INT8
<a name="line-21403"></a>      type: DT_UINT16
<a name="line-21404"></a>      type: DT_HALF
<a name="line-21405"></a>    }
<a name="line-21406"></a>  }
<a name="line-21407"></a>  default_value { type: DT_FLOAT }
<a name="line-21408"></a>  name: "T"
<a name="line-21409"></a>  type: "type"
<a name="line-21410"></a>}
<a name="line-21411"></a>input_arg {
<a name="line-21412"></a>  description: "Scalar.  Tag to use for the `Summary.Value`."
<a name="line-21413"></a>  name: "tag"
<a name="line-21414"></a>  type: DT_STRING
<a name="line-21415"></a>}
<a name="line-21416"></a>input_arg {
<a name="line-21417"></a>  description: "Any shape. Values to use to build the histogram."
<a name="line-21418"></a>  name: "values"
<a name="line-21419"></a>  type_attr: "T"
<a name="line-21420"></a>}
<a name="line-21421"></a>output_arg {
<a name="line-21422"></a>  description: "Scalar. Serialized `Summary` protocol buffer."
<a name="line-21423"></a>  name: "summary"
<a name="line-21424"></a>  type: DT_STRING
<a name="line-21425"></a>}
<a name="line-21426"></a>-}</span>
<a name="line-21427"></a>
<a name="line-21428"></a><span class='hs-comment'>-- | Computes the gradients of 3-D convolution with respect to the input.</span>
<a name="line-21429"></a>
<a name="line-21430"></a><a name="conv3DBackpropInputV2"></a><span class='hs-definition'>conv3DBackpropInputV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-21431"></a>                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21432"></a>                                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21433"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-21434"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21435"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-21436"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-21437"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-21438"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-21439"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21440"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __input_sizes__: An integer vector representing the tensor shape of `input`,</span>
<a name="line-21441"></a>                                                  <span class='hs-comment'>-- where `input` is a 5-D</span>
<a name="line-21442"></a>                                                  <span class='hs-comment'>-- `[batch, depth, rows, cols, in_channels]` tensor.</span>
<a name="line-21443"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.</span>
<a name="line-21444"></a>                                        <span class='hs-comment'>-- `in_channels` must match between `input` and `filter`.</span>
<a name="line-21445"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,</span>
<a name="line-21446"></a>                                        <span class='hs-comment'>-- out_channels]`.</span>
<a name="line-21447"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-21448"></a><span class='hs-definition'>conv3DBackpropInputV2</span> <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21449"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv3DBackpropInputV2"</span>
<a name="line-21450"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21451"></a>        <span class='hs-varid'>input_sizes</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-21452"></a><span class='hs-comment'>{-
<a name="line-21453"></a>attr {
<a name="line-21454"></a>  allowed_values {
<a name="line-21455"></a>    list {
<a name="line-21456"></a>      type: DT_FLOAT
<a name="line-21457"></a>      type: DT_DOUBLE
<a name="line-21458"></a>      type: DT_INT64
<a name="line-21459"></a>      type: DT_INT32
<a name="line-21460"></a>      type: DT_UINT8
<a name="line-21461"></a>      type: DT_UINT16
<a name="line-21462"></a>      type: DT_INT16
<a name="line-21463"></a>      type: DT_INT8
<a name="line-21464"></a>      type: DT_COMPLEX64
<a name="line-21465"></a>      type: DT_COMPLEX128
<a name="line-21466"></a>      type: DT_QINT8
<a name="line-21467"></a>      type: DT_QUINT8
<a name="line-21468"></a>      type: DT_QINT32
<a name="line-21469"></a>      type: DT_HALF
<a name="line-21470"></a>    }
<a name="line-21471"></a>  }
<a name="line-21472"></a>  name: "T"
<a name="line-21473"></a>  type: "type"
<a name="line-21474"></a>}
<a name="line-21475"></a>attr {
<a name="line-21476"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-21477"></a>  has_minimum: true
<a name="line-21478"></a>  minimum: 5
<a name="line-21479"></a>  name: "strides"
<a name="line-21480"></a>  type: "list(int)"
<a name="line-21481"></a>}
<a name="line-21482"></a>attr {
<a name="line-21483"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-21484"></a>  description: "The type of padding algorithm to use."
<a name="line-21485"></a>  name: "padding"
<a name="line-21486"></a>  type: "string"
<a name="line-21487"></a>}
<a name="line-21488"></a>input_arg {
<a name="line-21489"></a>  description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor."
<a name="line-21490"></a>  name: "input_sizes"
<a name="line-21491"></a>  type: DT_INT32
<a name="line-21492"></a>}
<a name="line-21493"></a>input_arg {
<a name="line-21494"></a>  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
<a name="line-21495"></a>  name: "filter"
<a name="line-21496"></a>  type_attr: "T"
<a name="line-21497"></a>}
<a name="line-21498"></a>input_arg {
<a name="line-21499"></a>  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
<a name="line-21500"></a>  name: "out_backprop"
<a name="line-21501"></a>  type_attr: "T"
<a name="line-21502"></a>}
<a name="line-21503"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-21504"></a>-}</span>
<a name="line-21505"></a>
<a name="line-21506"></a><span class='hs-comment'>-- | Computes the gradient of bilinear interpolation.</span>
<a name="line-21507"></a>
<a name="line-21508"></a><a name="resizeBilinearGrad"></a><span class='hs-definition'>resizeBilinearGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-21509"></a>                                                              <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-21510"></a>                                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21511"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-21512"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`,</span>
<a name="line-21513"></a>                                     <span class='hs-comment'>-- The image tensor that was resized.</span>
<a name="line-21514"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`.</span>
<a name="line-21515"></a>                      <span class='hs-comment'>-- Gradients with respect to the input image. Input image must have been</span>
<a name="line-21516"></a>                      <span class='hs-comment'>-- float or double.</span>
<a name="line-21517"></a><span class='hs-definition'>resizeBilinearGrad</span> <span class='hs-varid'>grads</span> <span class='hs-varid'>original_image</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21518"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeBilinearGrad"</span>
<a name="line-21519"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21520"></a>        <span class='hs-varid'>grads</span> <span class='hs-varid'>original_image</span>
<a name="line-21521"></a><span class='hs-comment'>{-
<a name="line-21522"></a>attr {
<a name="line-21523"></a>  allowed_values {
<a name="line-21524"></a>    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
<a name="line-21525"></a>  }
<a name="line-21526"></a>  name: "T"
<a name="line-21527"></a>  type: "type"
<a name="line-21528"></a>}
<a name="line-21529"></a>attr {
<a name="line-21530"></a>  default_value { b: false }
<a name="line-21531"></a>  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
<a name="line-21532"></a>  name: "align_corners"
<a name="line-21533"></a>  type: "bool"
<a name="line-21534"></a>}
<a name="line-21535"></a>input_arg {
<a name="line-21536"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-21537"></a>  name: "grads"
<a name="line-21538"></a>  type: DT_FLOAT
<a name="line-21539"></a>}
<a name="line-21540"></a>input_arg {
<a name="line-21541"></a>  description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized."
<a name="line-21542"></a>  name: "original_image"
<a name="line-21543"></a>  type_attr: "T"
<a name="line-21544"></a>}
<a name="line-21545"></a>output_arg {
<a name="line-21546"></a>  description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double."
<a name="line-21547"></a>  name: "output"
<a name="line-21548"></a>  type_attr: "T"
<a name="line-21549"></a>}
<a name="line-21550"></a>-}</span>
<a name="line-21551"></a>
<a name="line-21552"></a><a name="addManySparseToTensorsMap"></a><span class='hs-comment'>-- | Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.</span>
<a name="line-21553"></a><span class='hs-comment'>--</span>
<a name="line-21554"></a><span class='hs-comment'>-- A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,</span>
<a name="line-21555"></a><span class='hs-comment'>-- `sparse_values`, and `sparse_shape`, where</span>
<a name="line-21556"></a><span class='hs-comment'>-- </span>
<a name="line-21557"></a><span class='hs-comment'>-- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```</span>
<a name="line-21558"></a><span class='hs-comment'>-- </span>
<a name="line-21559"></a><span class='hs-comment'>-- An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`</span>
<a name="line-21560"></a><span class='hs-comment'>-- having a first `sparse_indices` column taking values between `[0, N)`, where</span>
<a name="line-21561"></a><span class='hs-comment'>-- the minibatch size `N == sparse_shape[0]`.</span>
<a name="line-21562"></a><span class='hs-comment'>-- </span>
<a name="line-21563"></a><span class='hs-comment'>-- The input `SparseTensor` must have rank `R` greater than 1, and the first</span>
<a name="line-21564"></a><span class='hs-comment'>-- dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`</span>
<a name="line-21565"></a><span class='hs-comment'>-- must be sorted in increasing order of this first dimension.  The stored</span>
<a name="line-21566"></a><span class='hs-comment'>-- `SparseTensor` objects pointed to by each row of the output `sparse_handles`</span>
<a name="line-21567"></a><span class='hs-comment'>-- will have rank `R-1`.</span>
<a name="line-21568"></a><span class='hs-comment'>-- </span>
<a name="line-21569"></a><span class='hs-comment'>-- The `SparseTensor` values can then be read out as part of a minibatch by passing</span>
<a name="line-21570"></a><span class='hs-comment'>-- the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure</span>
<a name="line-21571"></a><span class='hs-comment'>-- the correct `SparseTensorsMap` is accessed, ensure that the same</span>
<a name="line-21572"></a><span class='hs-comment'>-- `container` and `shared_name` are passed to that Op.  If no `shared_name`</span>
<a name="line-21573"></a><span class='hs-comment'>-- is provided here, instead use the *name* of the Operation created by calling</span>
<a name="line-21574"></a><span class='hs-comment'>-- `AddManySparseToTensorsMap` as the `shared_name` passed to</span>
<a name="line-21575"></a><span class='hs-comment'>-- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.</span>
<a name="line-21576"></a><span class='hs-definition'>addManySparseToTensorsMap</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21577"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.</span>
<a name="line-21578"></a>                                                      <span class='hs-comment'>-- `sparse_indices[:, 0]` must be ordered values in `[0, N)`.</span>
<a name="line-21579"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.</span>
<a name="line-21580"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.</span>
<a name="line-21581"></a>                                                         <span class='hs-comment'>-- The minibatch size `N == sparse_shape[0]`.</span>
<a name="line-21582"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __sparse_handles__: 1-D.  The handles of the `SparseTensor` now stored in the</span>
<a name="line-21583"></a>                             <span class='hs-comment'>-- `SparseTensorsMap`.  Shape: `[N]`.</span>
<a name="line-21584"></a><span class='hs-definition'>addManySparseToTensorsMap</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span>
<a name="line-21585"></a>                          <span class='hs-varid'>sparse_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21586"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AddManySparseToTensorsMap"</span>
<a name="line-21587"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21588"></a>        <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_values</span> <span class='hs-varid'>sparse_shape</span>
<a name="line-21589"></a><span class='hs-comment'>{-
<a name="line-21590"></a>attr { name: "T" type: "type" }
<a name="line-21591"></a>attr {
<a name="line-21592"></a>  default_value { s: "" }
<a name="line-21593"></a>  description: "The container name for the `SparseTensorsMap` created by this op."
<a name="line-21594"></a>  name: "container"
<a name="line-21595"></a>  type: "string"
<a name="line-21596"></a>}
<a name="line-21597"></a>attr {
<a name="line-21598"></a>  default_value { s: "" }
<a name="line-21599"></a>  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
<a name="line-21600"></a>  name: "shared_name"
<a name="line-21601"></a>  type: "string"
<a name="line-21602"></a>}
<a name="line-21603"></a>input_arg {
<a name="line-21604"></a>  description: "2-D.  The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`."
<a name="line-21605"></a>  name: "sparse_indices"
<a name="line-21606"></a>  type: DT_INT64
<a name="line-21607"></a>}
<a name="line-21608"></a>input_arg {
<a name="line-21609"></a>  description: "1-D.  The `values` of the minibatch `SparseTensor`."
<a name="line-21610"></a>  name: "sparse_values"
<a name="line-21611"></a>  type_attr: "T"
<a name="line-21612"></a>}
<a name="line-21613"></a>input_arg {
<a name="line-21614"></a>  description: "1-D.  The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`."
<a name="line-21615"></a>  name: "sparse_shape"
<a name="line-21616"></a>  type: DT_INT64
<a name="line-21617"></a>}
<a name="line-21618"></a>output_arg {
<a name="line-21619"></a>  description: "1-D.  The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`.  Shape: `[N]`."
<a name="line-21620"></a>  name: "sparse_handles"
<a name="line-21621"></a>  type: DT_INT64
<a name="line-21622"></a>}
<a name="line-21623"></a>-}</span>
<a name="line-21624"></a>
<a name="line-21625"></a><span class='hs-comment'>-- | </span>
<a name="line-21626"></a>
<a name="line-21627"></a><a name="batchIFFT"></a><span class='hs-definition'>batchIFFT</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-21628"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-21629"></a><span class='hs-definition'>batchIFFT</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21630"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchIFFT"</span><span class='hs-layout'>)</span>
<a name="line-21631"></a>        <span class='hs-varid'>input</span>
<a name="line-21632"></a><span class='hs-comment'>{-
<a name="line-21633"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-21634"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-21635"></a>-}</span>
<a name="line-21636"></a>
<a name="line-21637"></a><span class='hs-comment'>-- | </span>
<a name="line-21638"></a>
<a name="line-21639"></a><a name="batchMatrixDeterminant"></a><span class='hs-definition'>batchMatrixDeterminant</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-21640"></a>                                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21641"></a>                          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-21642"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-21643"></a><span class='hs-definition'>batchMatrixDeterminant</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21644"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatrixDeterminant"</span>
<a name="line-21645"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21646"></a>        <span class='hs-varid'>input</span>
<a name="line-21647"></a><span class='hs-comment'>{-
<a name="line-21648"></a>attr {
<a name="line-21649"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-21650"></a>  name: "T"
<a name="line-21651"></a>  type: "type"
<a name="line-21652"></a>}
<a name="line-21653"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-21654"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-21655"></a>-}</span>
<a name="line-21656"></a>
<a name="line-21657"></a><span class='hs-comment'>-- | Delete the tensor specified by its handle in the session.</span>
<a name="line-21658"></a>
<a name="line-21659"></a><a name="deleteSessionTensor"></a><span class='hs-definition'>deleteSessionTensor</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle for a tensor stored in the session state.</span>
<a name="line-21660"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>ControlNode</span>
<a name="line-21661"></a><span class='hs-definition'>deleteSessionTensor</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21662"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DeleteSessionTensor"</span><span class='hs-layout'>)</span>
<a name="line-21663"></a>        <span class='hs-varid'>handle</span>
<a name="line-21664"></a><span class='hs-comment'>{-
<a name="line-21665"></a>input_arg {
<a name="line-21666"></a>  description: "The handle for a tensor stored in the session state."
<a name="line-21667"></a>  name: "handle"
<a name="line-21668"></a>  type: DT_STRING
<a name="line-21669"></a>}
<a name="line-21670"></a>-}</span>
<a name="line-21671"></a>
<a name="line-21672"></a><span class='hs-comment'>-- | Computes the number of elements in the given table.</span>
<a name="line-21673"></a>
<a name="line-21674"></a><a name="lookupTableSize"></a><span class='hs-definition'>lookupTableSize</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to the table.</span>
<a name="line-21675"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __size__: Scalar that contains number of elements in the table.</span>
<a name="line-21676"></a><span class='hs-definition'>lookupTableSize</span> <span class='hs-varid'>table_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21677"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LookupTableSize"</span><span class='hs-layout'>)</span>
<a name="line-21678"></a>        <span class='hs-varid'>table_handle</span>
<a name="line-21679"></a><span class='hs-comment'>{-
<a name="line-21680"></a>input_arg {
<a name="line-21681"></a>  description: "Handle to the table."
<a name="line-21682"></a>  is_ref: true
<a name="line-21683"></a>  name: "table_handle"
<a name="line-21684"></a>  type: DT_STRING
<a name="line-21685"></a>}
<a name="line-21686"></a>output_arg {
<a name="line-21687"></a>  description: "Scalar that contains number of elements in the table."
<a name="line-21688"></a>  name: "size"
<a name="line-21689"></a>  type: DT_INT64
<a name="line-21690"></a>}
<a name="line-21691"></a>-}</span>
<a name="line-21692"></a>
<a name="line-21693"></a><span class='hs-comment'>-- | Computes rectified linear: `max(features, 0)`.</span>
<a name="line-21694"></a>
<a name="line-21695"></a><a name="relu"></a><span class='hs-definition'>relu</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21696"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-21697"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-21698"></a>                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21699"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-21700"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __activations__</span>
<a name="line-21701"></a><span class='hs-definition'>relu</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21702"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Relu"</span>
<a name="line-21703"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21704"></a>        <span class='hs-varid'>features</span>
<a name="line-21705"></a><span class='hs-comment'>{-
<a name="line-21706"></a>attr {
<a name="line-21707"></a>  allowed_values {
<a name="line-21708"></a>    list {
<a name="line-21709"></a>      type: DT_FLOAT
<a name="line-21710"></a>      type: DT_DOUBLE
<a name="line-21711"></a>      type: DT_INT32
<a name="line-21712"></a>      type: DT_INT64
<a name="line-21713"></a>      type: DT_UINT8
<a name="line-21714"></a>      type: DT_INT16
<a name="line-21715"></a>      type: DT_INT8
<a name="line-21716"></a>      type: DT_UINT16
<a name="line-21717"></a>      type: DT_HALF
<a name="line-21718"></a>    }
<a name="line-21719"></a>  }
<a name="line-21720"></a>  name: "T"
<a name="line-21721"></a>  type: "type"
<a name="line-21722"></a>}
<a name="line-21723"></a>input_arg { name: "features" type_attr: "T" }
<a name="line-21724"></a>output_arg { name: "activations" type_attr: "T" }
<a name="line-21725"></a>-}</span>
<a name="line-21726"></a>
<a name="line-21727"></a><a name="dynamicStitch"></a><span class='hs-comment'>-- | Interleave the values from the `data` tensors into a single tensor.</span>
<a name="line-21728"></a><span class='hs-comment'>--</span>
<a name="line-21729"></a><span class='hs-comment'>-- Builds a merged tensor such that</span>
<a name="line-21730"></a><span class='hs-comment'>-- </span>
<a name="line-21731"></a><span class='hs-comment'>-- ```python</span>
<a name="line-21732"></a><span class='hs-comment'>--     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]</span>
<a name="line-21733"></a><span class='hs-comment'>-- ```</span>
<a name="line-21734"></a><span class='hs-comment'>-- </span>
<a name="line-21735"></a><span class='hs-comment'>-- For example, if each `indices[m]` is scalar or vector, we have</span>
<a name="line-21736"></a><span class='hs-comment'>-- </span>
<a name="line-21737"></a><span class='hs-comment'>-- ```python</span>
<a name="line-21738"></a><span class='hs-comment'>--     # Scalar indices:</span>
<a name="line-21739"></a><span class='hs-comment'>--     merged[indices[m], ...] = data[m][...]</span>
<a name="line-21740"></a><span class='hs-comment'>-- </span>
<a name="line-21741"></a><span class='hs-comment'>--     # Vector indices:</span>
<a name="line-21742"></a><span class='hs-comment'>--     merged[indices[m][i], ...] = data[m][i, ...]</span>
<a name="line-21743"></a><span class='hs-comment'>-- ```</span>
<a name="line-21744"></a><span class='hs-comment'>-- </span>
<a name="line-21745"></a><span class='hs-comment'>-- Each `data[i].shape` must start with the corresponding `indices[i].shape`,</span>
<a name="line-21746"></a><span class='hs-comment'>-- and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we</span>
<a name="line-21747"></a><span class='hs-comment'>-- must have `data[i].shape = indices[i].shape + constant`.  In terms of this</span>
<a name="line-21748"></a><span class='hs-comment'>-- `constant`, the output shape is</span>
<a name="line-21749"></a><span class='hs-comment'>-- </span>
<a name="line-21750"></a><span class='hs-comment'>--     merged.shape = [max(indices)] + constant</span>
<a name="line-21751"></a><span class='hs-comment'>-- </span>
<a name="line-21752"></a><span class='hs-comment'>-- Values are merged in order, so if an index appears in both `indices[m][i]` and</span>
<a name="line-21753"></a><span class='hs-comment'>-- `indices[n][j]` for `(m,i) &lt; (n,j)` the slice `data[n][j]` will appear in the</span>
<a name="line-21754"></a><span class='hs-comment'>-- merged result.</span>
<a name="line-21755"></a><span class='hs-comment'>-- </span>
<a name="line-21756"></a><span class='hs-comment'>-- For example:</span>
<a name="line-21757"></a><span class='hs-comment'>-- </span>
<a name="line-21758"></a><span class='hs-comment'>-- ```python</span>
<a name="line-21759"></a><span class='hs-comment'>--     indices[0] = 6</span>
<a name="line-21760"></a><span class='hs-comment'>--     indices[1] = [4, 1]</span>
<a name="line-21761"></a><span class='hs-comment'>--     indices[2] = [[5, 2], [0, 3]]</span>
<a name="line-21762"></a><span class='hs-comment'>--     data[0] = [61, 62]</span>
<a name="line-21763"></a><span class='hs-comment'>--     data[1] = [[41, 42], [11, 12]]</span>
<a name="line-21764"></a><span class='hs-comment'>--     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]</span>
<a name="line-21765"></a><span class='hs-comment'>--     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],</span>
<a name="line-21766"></a><span class='hs-comment'>--               [51, 52], [61, 62]]</span>
<a name="line-21767"></a><span class='hs-comment'>-- ```</span>
<a name="line-21768"></a><span class='hs-comment'>-- </span>
<a name="line-21769"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-21770"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/DynamicStitch.png" alt&gt;</span>
<a name="line-21771"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-21772"></a><span class='hs-definition'>dynamicStitch</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21773"></a>                 <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __indices__</span>
<a name="line-21774"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-21775"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __merged__</span>
<a name="line-21776"></a><span class='hs-definition'>dynamicStitch</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"indices"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>indices</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-21777"></a>                                                    <span class='hs-layout'>(</span><span class='hs-str'>"data"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>data'</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21778"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DynamicStitch"</span>
<a name="line-21779"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-21780"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-21781"></a>        <span class='hs-varid'>indices</span> <span class='hs-varid'>data'</span>
<a name="line-21782"></a>  <span class='hs-keyword'>where</span>
<a name="line-21783"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>indices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-21784"></a><span class='hs-comment'>{-
<a name="line-21785"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-21786"></a>attr { name: "T" type: "type" }
<a name="line-21787"></a>input_arg { name: "indices" number_attr: "N" type: DT_INT32 }
<a name="line-21788"></a>input_arg { name: "data" number_attr: "N" type_attr: "T" }
<a name="line-21789"></a>output_arg { name: "merged" type_attr: "T" }
<a name="line-21790"></a>-}</span>
<a name="line-21791"></a>
<a name="line-21792"></a><a name="lookupTableFind"></a><span class='hs-comment'>-- | Looks up keys in a table, outputs the corresponding values.</span>
<a name="line-21793"></a><span class='hs-comment'>--</span>
<a name="line-21794"></a><span class='hs-comment'>-- The tensor `keys` must of the same type as the keys of the table.</span>
<a name="line-21795"></a><span class='hs-comment'>-- The output `values` is of the type of the table values.</span>
<a name="line-21796"></a><span class='hs-comment'>-- </span>
<a name="line-21797"></a><span class='hs-comment'>-- The scalar `default_value` is the value output for keys not present in the</span>
<a name="line-21798"></a><span class='hs-comment'>-- table. It must also be of the same type as the table values.</span>
<a name="line-21799"></a><span class='hs-definition'>lookupTableFind</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tin</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tin</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21800"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to the table.</span>
<a name="line-21801"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tin</span> <span class='hs-comment'>-- ^ __keys__: Any shape.  Keys to look up.</span>
<a name="line-21802"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __default_value__</span>
<a name="line-21803"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`</span>
<a name="line-21804"></a>                   <span class='hs-comment'>-- for missing keys.</span>
<a name="line-21805"></a><span class='hs-definition'>lookupTableFind</span> <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>default_value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21806"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LookupTableFind"</span>
<a name="line-21807"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tin"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tin</span><span class='hs-layout'>)</span>
<a name="line-21808"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21809"></a>        <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>default_value</span>
<a name="line-21810"></a><span class='hs-comment'>{-
<a name="line-21811"></a>attr { name: "Tin" type: "type" }
<a name="line-21812"></a>attr { name: "Tout" type: "type" }
<a name="line-21813"></a>input_arg {
<a name="line-21814"></a>  description: "Handle to the table."
<a name="line-21815"></a>  is_ref: true
<a name="line-21816"></a>  name: "table_handle"
<a name="line-21817"></a>  type: DT_STRING
<a name="line-21818"></a>}
<a name="line-21819"></a>input_arg {
<a name="line-21820"></a>  description: "Any shape.  Keys to look up."
<a name="line-21821"></a>  name: "keys"
<a name="line-21822"></a>  type_attr: "Tin"
<a name="line-21823"></a>}
<a name="line-21824"></a>input_arg { name: "default_value" type_attr: "Tout" }
<a name="line-21825"></a>output_arg {
<a name="line-21826"></a>  description: "Same shape as `keys`.  Values found in the table, or `default_values`\nfor missing keys."
<a name="line-21827"></a>  name: "values"
<a name="line-21828"></a>  type_attr: "Tout"
<a name="line-21829"></a>}
<a name="line-21830"></a>-}</span>
<a name="line-21831"></a>
<a name="line-21832"></a><a name="sampleDistortedBoundingBox"></a><span class='hs-comment'>-- | Generate a single randomly distorted bounding box for an image.</span>
<a name="line-21833"></a><span class='hs-comment'>--</span>
<a name="line-21834"></a><span class='hs-comment'>-- Bounding box annotations are often supplied in addition to ground-truth labels</span>
<a name="line-21835"></a><span class='hs-comment'>-- in image recognition or object localization tasks. A common technique for</span>
<a name="line-21836"></a><span class='hs-comment'>-- training such a system is to randomly distort an image while preserving</span>
<a name="line-21837"></a><span class='hs-comment'>-- its content, i.e. *data augmentation*. This Op outputs a randomly distorted</span>
<a name="line-21838"></a><span class='hs-comment'>-- localization of an object, i.e. bounding box, given an `image_size`,</span>
<a name="line-21839"></a><span class='hs-comment'>-- `bounding_boxes` and a series of constraints.</span>
<a name="line-21840"></a><span class='hs-comment'>-- </span>
<a name="line-21841"></a><span class='hs-comment'>-- The output of this Op is a single bounding box that may be used to crop the</span>
<a name="line-21842"></a><span class='hs-comment'>-- original image. The output is returned as 3 tensors: `begin`, `size` and</span>
<a name="line-21843"></a><span class='hs-comment'>-- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the</span>
<a name="line-21844"></a><span class='hs-comment'>-- image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize</span>
<a name="line-21845"></a><span class='hs-comment'>-- what the bounding box looks like.</span>
<a name="line-21846"></a><span class='hs-comment'>-- </span>
<a name="line-21847"></a><span class='hs-comment'>-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The</span>
<a name="line-21848"></a><span class='hs-comment'>-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and</span>
<a name="line-21849"></a><span class='hs-comment'>-- height of the underlying image.</span>
<a name="line-21850"></a><span class='hs-comment'>-- </span>
<a name="line-21851"></a><span class='hs-comment'>-- For example,</span>
<a name="line-21852"></a><span class='hs-comment'>-- </span>
<a name="line-21853"></a><span class='hs-comment'>-- ```python</span>
<a name="line-21854"></a><span class='hs-comment'>--     # Generate a single distorted bounding box.</span>
<a name="line-21855"></a><span class='hs-comment'>--     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(</span>
<a name="line-21856"></a><span class='hs-comment'>--         tf.shape(image),</span>
<a name="line-21857"></a><span class='hs-comment'>--         bounding_boxes=bounding_boxes)</span>
<a name="line-21858"></a><span class='hs-comment'>-- </span>
<a name="line-21859"></a><span class='hs-comment'>--     # Draw the bounding box in an image summary.</span>
<a name="line-21860"></a><span class='hs-comment'>--     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),</span>
<a name="line-21861"></a><span class='hs-comment'>--                                                   bbox_for_draw)</span>
<a name="line-21862"></a><span class='hs-comment'>--     tf.image_summary('images_with_box', image_with_box)</span>
<a name="line-21863"></a><span class='hs-comment'>-- </span>
<a name="line-21864"></a><span class='hs-comment'>--     # Employ the bounding box to distort the image.</span>
<a name="line-21865"></a><span class='hs-comment'>--     distorted_image = tf.slice(image, begin, size)</span>
<a name="line-21866"></a><span class='hs-comment'>-- ```</span>
<a name="line-21867"></a><span class='hs-comment'>-- </span>
<a name="line-21868"></a><span class='hs-comment'>-- Note that if no bounding box information is available, setting</span>
<a name="line-21869"></a><span class='hs-comment'>-- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit</span>
<a name="line-21870"></a><span class='hs-comment'>-- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is</span>
<a name="line-21871"></a><span class='hs-comment'>-- false and no bounding boxes are supplied, an error is raised.</span>
<a name="line-21872"></a><span class='hs-definition'>sampleDistortedBoundingBox</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-21873"></a>                                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-21874"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21875"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-21876"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-21877"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21878"></a>                              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __image_size__: 1-D, containing `[height, width, channels]`.</span>
<a name="line-21879"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes</span>
<a name="line-21880"></a>                                                 <span class='hs-comment'>-- associated with the image.</span>
<a name="line-21881"></a>                              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-21882"></a>                                         <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21883"></a>                              <span class='hs-comment'>-- ^ (__begin__, __size__, __bboxes__)</span>
<a name="line-21884"></a>                              <span class='hs-comment'>--</span>
<a name="line-21885"></a>                              <span class='hs-comment'>-- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to</span>
<a name="line-21886"></a>                              <span class='hs-comment'>-- `tf.slice`.</span>
<a name="line-21887"></a>                              <span class='hs-comment'>--</span>
<a name="line-21888"></a>                              <span class='hs-comment'>-- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to</span>
<a name="line-21889"></a>                              <span class='hs-comment'>-- `tf.slice`.</span>
<a name="line-21890"></a>                              <span class='hs-comment'>--</span>
<a name="line-21891"></a>                              <span class='hs-comment'>-- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.</span>
<a name="line-21892"></a>                              <span class='hs-comment'>-- Provide as input to `tf.image.draw_bounding_boxes`.</span>
<a name="line-21893"></a><span class='hs-definition'>sampleDistortedBoundingBox</span> <span class='hs-varid'>image_size</span> <span class='hs-varid'>bounding_boxes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21894"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SampleDistortedBoundingBox"</span>
<a name="line-21895"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-21896"></a>        <span class='hs-varid'>image_size</span> <span class='hs-varid'>bounding_boxes</span>
<a name="line-21897"></a><span class='hs-comment'>{-
<a name="line-21898"></a>attr {
<a name="line-21899"></a>  allowed_values {
<a name="line-21900"></a>    list {
<a name="line-21901"></a>      type: DT_UINT8
<a name="line-21902"></a>      type: DT_INT8
<a name="line-21903"></a>      type: DT_INT16
<a name="line-21904"></a>      type: DT_INT32
<a name="line-21905"></a>      type: DT_INT64
<a name="line-21906"></a>    }
<a name="line-21907"></a>  }
<a name="line-21908"></a>  name: "T"
<a name="line-21909"></a>  type: "type"
<a name="line-21910"></a>}
<a name="line-21911"></a>attr {
<a name="line-21912"></a>  default_value { i: 0 }
<a name="line-21913"></a>  description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`.  Otherwise, it is seeded by a random\nseed."
<a name="line-21914"></a>  name: "seed"
<a name="line-21915"></a>  type: "int"
<a name="line-21916"></a>}
<a name="line-21917"></a>attr {
<a name="line-21918"></a>  default_value { i: 0 }
<a name="line-21919"></a>  description: "A second seed to avoid seed collision."
<a name="line-21920"></a>  name: "seed2"
<a name="line-21921"></a>  type: "int"
<a name="line-21922"></a>}
<a name="line-21923"></a>attr {
<a name="line-21924"></a>  default_value { f: 0.1 }
<a name="line-21925"></a>  description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied."
<a name="line-21926"></a>  name: "min_object_covered"
<a name="line-21927"></a>  type: "float"
<a name="line-21928"></a>}
<a name="line-21929"></a>attr {
<a name="line-21930"></a>  default_value { list { f: 0.75 f: 1.33 } }
<a name="line-21931"></a>  description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range."
<a name="line-21932"></a>  name: "aspect_ratio_range"
<a name="line-21933"></a>  type: "list(float)"
<a name="line-21934"></a>}
<a name="line-21935"></a>attr {
<a name="line-21936"></a>  default_value { list { f: 5.0e-2 f: 1.0 } }
<a name="line-21937"></a>  description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range."
<a name="line-21938"></a>  name: "area_range"
<a name="line-21939"></a>  type: "list(float)"
<a name="line-21940"></a>}
<a name="line-21941"></a>attr {
<a name="line-21942"></a>  default_value { i: 100 }
<a name="line-21943"></a>  description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage."
<a name="line-21944"></a>  name: "max_attempts"
<a name="line-21945"></a>  type: "int"
<a name="line-21946"></a>}
<a name="line-21947"></a>attr {
<a name="line-21948"></a>  default_value { b: false }
<a name="line-21949"></a>  description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error."
<a name="line-21950"></a>  name: "use_image_if_no_bounding_boxes"
<a name="line-21951"></a>  type: "bool"
<a name="line-21952"></a>}
<a name="line-21953"></a>input_arg {
<a name="line-21954"></a>  description: "1-D, containing `[height, width, channels]`."
<a name="line-21955"></a>  name: "image_size"
<a name="line-21956"></a>  type_attr: "T"
<a name="line-21957"></a>}
<a name="line-21958"></a>input_arg {
<a name="line-21959"></a>  description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image."
<a name="line-21960"></a>  name: "bounding_boxes"
<a name="line-21961"></a>  type: DT_FLOAT
<a name="line-21962"></a>}
<a name="line-21963"></a>output_arg {
<a name="line-21964"></a>  description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`."
<a name="line-21965"></a>  name: "begin"
<a name="line-21966"></a>  type_attr: "T"
<a name="line-21967"></a>}
<a name="line-21968"></a>output_arg {
<a name="line-21969"></a>  description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`."
<a name="line-21970"></a>  name: "size"
<a name="line-21971"></a>  type_attr: "T"
<a name="line-21972"></a>}
<a name="line-21973"></a>output_arg {
<a name="line-21974"></a>  description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`."
<a name="line-21975"></a>  name: "bboxes"
<a name="line-21976"></a>  type: DT_FLOAT
<a name="line-21977"></a>}
<a name="line-21978"></a>-}</span>
<a name="line-21979"></a>
<a name="line-21980"></a><span class='hs-comment'>-- | Splits a tensor into `num_split` tensors along one dimension.</span>
<a name="line-21981"></a>
<a name="line-21982"></a><a name="splitV"></a><span class='hs-definition'>splitV</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tlen</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>,</span>
<a name="line-21983"></a>                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-21984"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-21985"></a>          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_split__</span>
<a name="line-21986"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The tensor to split.</span>
<a name="line-21987"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tlen</span> <span class='hs-comment'>-- ^ __size_splits__: list containing the sizes of each output tensor along the split</span>
<a name="line-21988"></a>                            <span class='hs-comment'>-- dimension. Must sum to the dimension of value along split_dim.</span>
<a name="line-21989"></a>                            <span class='hs-comment'>-- Can contain one -1 indicating that dimension is to be inferred.</span>
<a name="line-21990"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range</span>
<a name="line-21991"></a>                                      <span class='hs-comment'>-- `[0, rank(value))`.</span>
<a name="line-21992"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __output__: Tensors whose shape matches that of `value`</span>
<a name="line-21993"></a>          <span class='hs-comment'>-- except along `split_dim`, where their sizes are</span>
<a name="line-21994"></a>          <span class='hs-comment'>-- `size_splits[i]`.</span>
<a name="line-21995"></a><span class='hs-definition'>splitV</span> <span class='hs-varid'>num_split</span> <span class='hs-varid'>value</span> <span class='hs-varid'>size_splits</span> <span class='hs-varid'>split_dim</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-21996"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num_split</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SplitV"</span>
<a name="line-21997"></a>                             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-21998"></a>                             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tlen"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tlen</span><span class='hs-layout'>)</span>
<a name="line-21999"></a>                             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_split"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_split</span><span class='hs-layout'>)</span>
<a name="line-22000"></a>        <span class='hs-varid'>value</span> <span class='hs-varid'>size_splits</span> <span class='hs-varid'>split_dim</span>
<a name="line-22001"></a><span class='hs-comment'>{-
<a name="line-22002"></a>attr { has_minimum: true minimum: 1 name: "num_split" type: "int" }
<a name="line-22003"></a>attr { name: "T" type: "type" }
<a name="line-22004"></a>attr {
<a name="line-22005"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-22006"></a>  default_value { type: DT_INT64 }
<a name="line-22007"></a>  name: "Tlen"
<a name="line-22008"></a>  type: "type"
<a name="line-22009"></a>}
<a name="line-22010"></a>input_arg {
<a name="line-22011"></a>  description: "The tensor to split." name: "value" type_attr: "T"
<a name="line-22012"></a>}
<a name="line-22013"></a>input_arg {
<a name="line-22014"></a>  description: "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred."
<a name="line-22015"></a>  name: "size_splits"
<a name="line-22016"></a>  type_attr: "Tlen"
<a name="line-22017"></a>}
<a name="line-22018"></a>input_arg {
<a name="line-22019"></a>  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(value))`."
<a name="line-22020"></a>  name: "split_dim"
<a name="line-22021"></a>  type: DT_INT32
<a name="line-22022"></a>}
<a name="line-22023"></a>output_arg {
<a name="line-22024"></a>  description: "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`."
<a name="line-22025"></a>  name: "output"
<a name="line-22026"></a>  number_attr: "num_split"
<a name="line-22027"></a>  type_attr: "T"
<a name="line-22028"></a>}
<a name="line-22029"></a>-}</span>
<a name="line-22030"></a>
<a name="line-22031"></a><a name="fusedPadConv2D"></a><span class='hs-comment'>-- | Performs a padding as a preprocess during a convolution.</span>
<a name="line-22032"></a><span class='hs-comment'>--</span>
<a name="line-22033"></a><span class='hs-comment'>-- Similar to FusedResizeAndPadConv2d, this op allows for an optimized</span>
<a name="line-22034"></a><span class='hs-comment'>-- implementation where the spatial padding transformation stage is fused with the</span>
<a name="line-22035"></a><span class='hs-comment'>-- im2col lookup, but in this case without the bilinear filtering required for</span>
<a name="line-22036"></a><span class='hs-comment'>-- resizing. Fusing the padding prevents the need to write out the intermediate</span>
<a name="line-22037"></a><span class='hs-comment'>-- results as whole tensors, reducing memory pressure, and we can get some latency</span>
<a name="line-22038"></a><span class='hs-comment'>-- gains by merging the transformation calculations.</span>
<a name="line-22039"></a><span class='hs-comment'>-- The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'</span>
<a name="line-22040"></a><span class='hs-comment'>-- order is used instead.</span>
<a name="line-22041"></a><span class='hs-comment'>-- Internally this op uses a single per-graph scratch buffer, which means that it</span>
<a name="line-22042"></a><span class='hs-comment'>-- will block if multiple versions are being run in parallel. This is because this</span>
<a name="line-22043"></a><span class='hs-comment'>-- operator is primarily an optimization to minimize memory usage.</span>
<a name="line-22044"></a><span class='hs-definition'>fusedPadConv2D</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22045"></a>                                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22046"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22047"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.</span>
<a name="line-22048"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of</span>
<a name="line-22049"></a>                                              <span class='hs-comment'>-- rows must be the same as the rank of `input`.</span>
<a name="line-22050"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: 4-D with shape</span>
<a name="line-22051"></a>                                 <span class='hs-comment'>-- `[filter_height, filter_width, in_channels, out_channels]`.</span>
<a name="line-22052"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-22053"></a><span class='hs-definition'>fusedPadConv2D</span> <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22054"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FusedPadConv2D"</span>
<a name="line-22055"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22056"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>paddings</span> <span class='hs-varid'>filter</span>
<a name="line-22057"></a><span class='hs-comment'>{-
<a name="line-22058"></a>attr {
<a name="line-22059"></a>  allowed_values {
<a name="line-22060"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-22061"></a>  }
<a name="line-22062"></a>  name: "T"
<a name="line-22063"></a>  type: "type"
<a name="line-22064"></a>}
<a name="line-22065"></a>attr {
<a name="line-22066"></a>  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
<a name="line-22067"></a>  name: "mode"
<a name="line-22068"></a>  type: "string"
<a name="line-22069"></a>}
<a name="line-22070"></a>attr {
<a name="line-22071"></a>  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
<a name="line-22072"></a>  name: "strides"
<a name="line-22073"></a>  type: "list(int)"
<a name="line-22074"></a>}
<a name="line-22075"></a>attr {
<a name="line-22076"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-22077"></a>  description: "The type of padding algorithm to use."
<a name="line-22078"></a>  name: "padding"
<a name="line-22079"></a>  type: "string"
<a name="line-22080"></a>}
<a name="line-22081"></a>input_arg {
<a name="line-22082"></a>  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
<a name="line-22083"></a>  name: "input"
<a name="line-22084"></a>  type_attr: "T"
<a name="line-22085"></a>}
<a name="line-22086"></a>input_arg {
<a name="line-22087"></a>  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
<a name="line-22088"></a>  name: "paddings"
<a name="line-22089"></a>  type: DT_INT32
<a name="line-22090"></a>}
<a name="line-22091"></a>input_arg {
<a name="line-22092"></a>  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
<a name="line-22093"></a>  name: "filter"
<a name="line-22094"></a>  type_attr: "T"
<a name="line-22095"></a>}
<a name="line-22096"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-22097"></a>-}</span>
<a name="line-22098"></a>
<a name="line-22099"></a><a name="barrierInsertMany"></a><span class='hs-comment'>-- | For each key, assigns the respective value to the specified component.</span>
<a name="line-22100"></a><span class='hs-comment'>--</span>
<a name="line-22101"></a><span class='hs-comment'>-- If a key is not found in the barrier, this operation will create a new</span>
<a name="line-22102"></a><span class='hs-comment'>-- incomplete element. If a key is found in the barrier, and the element</span>
<a name="line-22103"></a><span class='hs-comment'>-- already has a value at component_index, this operation will fail with</span>
<a name="line-22104"></a><span class='hs-comment'>-- INVALID_ARGUMENT, and leave the barrier in an undefined state.</span>
<a name="line-22105"></a><span class='hs-definition'>barrierInsertMany</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22106"></a>                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __component_index__: The component of the barrier elements that is being assigned.</span>
<a name="line-22107"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a barrier.</span>
<a name="line-22108"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __keys__: A one-dimensional tensor of keys, with length n.</span>
<a name="line-22109"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __values__: An any-dimensional tensor of values, which are associated with the</span>
<a name="line-22110"></a>                                    <span class='hs-comment'>-- respective keys. The 0th dimension must have length n.</span>
<a name="line-22111"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-22112"></a><span class='hs-definition'>barrierInsertMany</span> <span class='hs-varid'>component_index</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22113"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BarrierInsertMany"</span>
<a name="line-22114"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-22115"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"component_index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>component_index</span><span class='hs-layout'>)</span>
<a name="line-22116"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span>
<a name="line-22117"></a><span class='hs-comment'>{-
<a name="line-22118"></a>attr { name: "T" type: "type" }
<a name="line-22119"></a>attr {
<a name="line-22120"></a>  description: "The component of the barrier elements that is being assigned."
<a name="line-22121"></a>  name: "component_index"
<a name="line-22122"></a>  type: "int"
<a name="line-22123"></a>}
<a name="line-22124"></a>input_arg {
<a name="line-22125"></a>  description: "The handle to a barrier."
<a name="line-22126"></a>  is_ref: true
<a name="line-22127"></a>  name: "handle"
<a name="line-22128"></a>  type: DT_STRING
<a name="line-22129"></a>}
<a name="line-22130"></a>input_arg {
<a name="line-22131"></a>  description: "A one-dimensional tensor of keys, with length n."
<a name="line-22132"></a>  name: "keys"
<a name="line-22133"></a>  type: DT_STRING
<a name="line-22134"></a>}
<a name="line-22135"></a>input_arg {
<a name="line-22136"></a>  description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n."
<a name="line-22137"></a>  name: "values"
<a name="line-22138"></a>  type_attr: "T"
<a name="line-22139"></a>}
<a name="line-22140"></a>-}</span>
<a name="line-22141"></a>
<a name="line-22142"></a><a name="abort"></a><span class='hs-comment'>-- | Raise a exception to abort the process when called.</span>
<a name="line-22143"></a><span class='hs-comment'>--</span>
<a name="line-22144"></a><span class='hs-comment'>-- Returns nothing but an exception.</span>
<a name="line-22145"></a><span class='hs-definition'>abort</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>ControlNode</span>
<a name="line-22146"></a><span class='hs-definition'>abort</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22147"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Abort"</span><span class='hs-layout'>)</span>
<a name="line-22148"></a>        
<a name="line-22149"></a><span class='hs-comment'>{-
<a name="line-22150"></a>attr {
<a name="line-22151"></a>  default_value { s: "" }
<a name="line-22152"></a>  description: "A string which is the message associated with the exception."
<a name="line-22153"></a>  name: "error_msg"
<a name="line-22154"></a>  type: "string"
<a name="line-22155"></a>}
<a name="line-22156"></a>-}</span>
<a name="line-22157"></a>
<a name="line-22158"></a><a name="maxPoolWithArgmax"></a><span class='hs-comment'>-- | Performs max pooling on the input and outputs both max values and indices.</span>
<a name="line-22159"></a><span class='hs-comment'>--</span>
<a name="line-22160"></a><span class='hs-comment'>-- The indices in `argmax` are flattened, so that a maximum value at position</span>
<a name="line-22161"></a><span class='hs-comment'>-- `[b, y, x, c]` becomes flattened index</span>
<a name="line-22162"></a><span class='hs-comment'>-- `((b * height + y) * width + x) * channels + c`.</span>
<a name="line-22163"></a><span class='hs-definition'>maxPoolWithArgmax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>targmax</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>,</span>
<a name="line-22164"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22165"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>,</span>
<a name="line-22166"></a>                                            <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-22167"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22168"></a>                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22169"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.</span>
<a name="line-22170"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>)</span>
<a name="line-22171"></a>                     <span class='hs-comment'>-- ^ (__output__, __argmax__)</span>
<a name="line-22172"></a>                     <span class='hs-comment'>--</span>
<a name="line-22173"></a>                     <span class='hs-comment'>-- * __output__: The max pooled output tensor.</span>
<a name="line-22174"></a>                     <span class='hs-comment'>--</span>
<a name="line-22175"></a>                     <span class='hs-comment'>-- * __argmax__: 4-D.  The flattened indices of the max values chosen for each output.</span>
<a name="line-22176"></a><span class='hs-definition'>maxPoolWithArgmax</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22177"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MaxPoolWithArgmax"</span>
<a name="line-22178"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Targmax"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>targmax</span><span class='hs-layout'>)</span>
<a name="line-22179"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22180"></a>        <span class='hs-varid'>input</span>
<a name="line-22181"></a><span class='hs-comment'>{-
<a name="line-22182"></a>attr {
<a name="line-22183"></a>  description: "The size of the window for each dimension of the input tensor."
<a name="line-22184"></a>  has_minimum: true
<a name="line-22185"></a>  minimum: 4
<a name="line-22186"></a>  name: "ksize"
<a name="line-22187"></a>  type: "list(int)"
<a name="line-22188"></a>}
<a name="line-22189"></a>attr {
<a name="line-22190"></a>  description: "The stride of the sliding window for each dimension of the\ninput tensor."
<a name="line-22191"></a>  has_minimum: true
<a name="line-22192"></a>  minimum: 4
<a name="line-22193"></a>  name: "strides"
<a name="line-22194"></a>  type: "list(int)"
<a name="line-22195"></a>}
<a name="line-22196"></a>attr {
<a name="line-22197"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-22198"></a>  default_value { type: DT_INT64 }
<a name="line-22199"></a>  name: "Targmax"
<a name="line-22200"></a>  type: "type"
<a name="line-22201"></a>}
<a name="line-22202"></a>attr {
<a name="line-22203"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-22204"></a>  description: "The type of padding algorithm to use."
<a name="line-22205"></a>  name: "padding"
<a name="line-22206"></a>  type: "string"
<a name="line-22207"></a>}
<a name="line-22208"></a>attr {
<a name="line-22209"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-22210"></a>  default_value { type: DT_FLOAT }
<a name="line-22211"></a>  name: "T"
<a name="line-22212"></a>  type: "type"
<a name="line-22213"></a>}
<a name="line-22214"></a>input_arg {
<a name="line-22215"></a>  description: "4-D with shape `[batch, height, width, channels]`.  Input to pool over."
<a name="line-22216"></a>  name: "input"
<a name="line-22217"></a>  type_attr: "T"
<a name="line-22218"></a>}
<a name="line-22219"></a>output_arg {
<a name="line-22220"></a>  description: "The max pooled output tensor."
<a name="line-22221"></a>  name: "output"
<a name="line-22222"></a>  type_attr: "T"
<a name="line-22223"></a>}
<a name="line-22224"></a>output_arg {
<a name="line-22225"></a>  description: "4-D.  The flattened indices of the max values chosen for each output."
<a name="line-22226"></a>  name: "argmax"
<a name="line-22227"></a>  type_attr: "Targmax"
<a name="line-22228"></a>}
<a name="line-22229"></a>-}</span>
<a name="line-22230"></a>
<a name="line-22231"></a><a name="refEnter"></a><span class='hs-comment'>-- | Creates or finds a child frame, and makes `data` available to the child frame.</span>
<a name="line-22232"></a><span class='hs-comment'>--</span>
<a name="line-22233"></a><span class='hs-comment'>-- The unique `frame_name` is used by the `Executor` to identify frames. If</span>
<a name="line-22234"></a><span class='hs-comment'>-- `is_constant` is true, `output` is a constant in the child frame; otherwise</span>
<a name="line-22235"></a><span class='hs-comment'>-- it may be changed in the child frame. At most `parallel_iterations` iterations</span>
<a name="line-22236"></a><span class='hs-comment'>-- are run in parallel in the child frame.</span>
<a name="line-22237"></a><span class='hs-definition'>refEnter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22238"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the child frame.</span>
<a name="line-22239"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-22240"></a><span class='hs-definition'>refEnter</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22241"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefEnter"</span>
<a name="line-22242"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22243"></a>        <span class='hs-varid'>data'</span>
<a name="line-22244"></a><span class='hs-comment'>{-
<a name="line-22245"></a>attr { name: "T" type: "type" }
<a name="line-22246"></a>attr {
<a name="line-22247"></a>  description: "The name of the child frame."
<a name="line-22248"></a>  name: "frame_name"
<a name="line-22249"></a>  type: "string"
<a name="line-22250"></a>}
<a name="line-22251"></a>attr {
<a name="line-22252"></a>  default_value { b: false }
<a name="line-22253"></a>  description: "If true, the output is constant within the child frame."
<a name="line-22254"></a>  name: "is_constant"
<a name="line-22255"></a>  type: "bool"
<a name="line-22256"></a>}
<a name="line-22257"></a>attr {
<a name="line-22258"></a>  default_value { i: 10 }
<a name="line-22259"></a>  description: "The number of iterations allowed to run in parallel."
<a name="line-22260"></a>  name: "parallel_iterations"
<a name="line-22261"></a>  type: "int"
<a name="line-22262"></a>}
<a name="line-22263"></a>input_arg {
<a name="line-22264"></a>  description: "The tensor to be made available to the child frame."
<a name="line-22265"></a>  is_ref: true
<a name="line-22266"></a>  name: "data"
<a name="line-22267"></a>  type_attr: "T"
<a name="line-22268"></a>}
<a name="line-22269"></a>output_arg {
<a name="line-22270"></a>  description: "The same tensor as `data`."
<a name="line-22271"></a>  is_ref: true
<a name="line-22272"></a>  name: "output"
<a name="line-22273"></a>  type_attr: "T"
<a name="line-22274"></a>}
<a name="line-22275"></a>-}</span>
<a name="line-22276"></a>
<a name="line-22277"></a><a name="dequantize"></a><span class='hs-comment'>-- | Dequantize the 'input' tensor into a float Tensor.</span>
<a name="line-22278"></a><span class='hs-comment'>--</span>
<a name="line-22279"></a><span class='hs-comment'>-- [min_range, max_range] are scalar floats that specify the range for</span>
<a name="line-22280"></a><span class='hs-comment'>-- the 'input' data. The 'mode' attribute controls exactly which calculations are</span>
<a name="line-22281"></a><span class='hs-comment'>-- used to convert the float values to their quantized equivalents.</span>
<a name="line-22282"></a><span class='hs-comment'>-- </span>
<a name="line-22283"></a><span class='hs-comment'>-- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:</span>
<a name="line-22284"></a><span class='hs-comment'>-- </span>
<a name="line-22285"></a><span class='hs-comment'>-- ```</span>
<a name="line-22286"></a><span class='hs-comment'>-- if T == qint8, in[i] += (range(T) + 1)/ 2.0</span>
<a name="line-22287"></a><span class='hs-comment'>-- out[i] = min_range + (in[i]* (max_range - min_range) / range(T))</span>
<a name="line-22288"></a><span class='hs-comment'>-- ```</span>
<a name="line-22289"></a><span class='hs-comment'>-- here `range(T) = numeric_limits&lt;T&gt;::max() - numeric_limits&lt;T&gt;::min()`</span>
<a name="line-22290"></a><span class='hs-comment'>-- </span>
<a name="line-22291"></a><span class='hs-comment'>-- *MIN_COMBINED Mode Example*</span>
<a name="line-22292"></a><span class='hs-comment'>-- </span>
<a name="line-22293"></a><span class='hs-comment'>-- If the input comes from a QuantizedRelu6, the output type is</span>
<a name="line-22294"></a><span class='hs-comment'>-- quint8 (range of 0-255) but the possible range of QuantizedRelu6 is</span>
<a name="line-22295"></a><span class='hs-comment'>-- 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.</span>
<a name="line-22296"></a><span class='hs-comment'>-- Dequantize on quint8 will take each value, cast to float, and multiply</span>
<a name="line-22297"></a><span class='hs-comment'>-- by 6 / 255.</span>
<a name="line-22298"></a><span class='hs-comment'>-- Note that if quantizedtype is qint8, the operation will additionally add</span>
<a name="line-22299"></a><span class='hs-comment'>-- each value by 128 prior to casting.</span>
<a name="line-22300"></a><span class='hs-comment'>-- </span>
<a name="line-22301"></a><span class='hs-comment'>-- If the mode is 'MIN_FIRST', then this approach is used:</span>
<a name="line-22302"></a><span class='hs-comment'>-- </span>
<a name="line-22303"></a><span class='hs-comment'>-- ```</span>
<a name="line-22304"></a><span class='hs-comment'>-- number_of_steps = 1 &lt;&lt; (# of bits in T)</span>
<a name="line-22305"></a><span class='hs-comment'>-- range_adjust = number_of_steps / (number_of_steps - 1)</span>
<a name="line-22306"></a><span class='hs-comment'>-- range = (range_max - range_min) * range_adjust</span>
<a name="line-22307"></a><span class='hs-comment'>-- range_scale = range / number_of_steps</span>
<a name="line-22308"></a><span class='hs-comment'>-- const double offset_input = static_cast&lt;double&gt;(input) - lowest_quantized;</span>
<a name="line-22309"></a><span class='hs-comment'>-- result = range_min + ((input - numeric_limits&lt;T&gt;::min()) * range_scale)</span>
<a name="line-22310"></a><span class='hs-comment'>-- ```</span>
<a name="line-22311"></a><span class='hs-definition'>dequantize</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-22312"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22313"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22314"></a>                                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22315"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-22316"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_range__: The minimum scalar value possibly produced for the input.</span>
<a name="line-22317"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_range__: The maximum scalar value possibly produced for the input.</span>
<a name="line-22318"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-22319"></a><span class='hs-definition'>dequantize</span> <span class='hs-varid'>input</span> <span class='hs-varid'>min_range</span> <span class='hs-varid'>max_range</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22320"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Dequantize"</span>
<a name="line-22321"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22322"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>min_range</span> <span class='hs-varid'>max_range</span>
<a name="line-22323"></a><span class='hs-comment'>{-
<a name="line-22324"></a>attr {
<a name="line-22325"></a>  allowed_values {
<a name="line-22326"></a>    list {
<a name="line-22327"></a>      type: DT_QINT8
<a name="line-22328"></a>      type: DT_QUINT8
<a name="line-22329"></a>      type: DT_QINT16
<a name="line-22330"></a>      type: DT_QUINT16
<a name="line-22331"></a>      type: DT_QINT32
<a name="line-22332"></a>    }
<a name="line-22333"></a>  }
<a name="line-22334"></a>  name: "T"
<a name="line-22335"></a>  type: "type"
<a name="line-22336"></a>}
<a name="line-22337"></a>attr {
<a name="line-22338"></a>  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
<a name="line-22339"></a>  default_value { s: "MIN_COMBINED" }
<a name="line-22340"></a>  name: "mode"
<a name="line-22341"></a>  type: "string"
<a name="line-22342"></a>}
<a name="line-22343"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-22344"></a>input_arg {
<a name="line-22345"></a>  description: "The minimum scalar value possibly produced for the input."
<a name="line-22346"></a>  name: "min_range"
<a name="line-22347"></a>  type: DT_FLOAT
<a name="line-22348"></a>}
<a name="line-22349"></a>input_arg {
<a name="line-22350"></a>  description: "The maximum scalar value possibly produced for the input."
<a name="line-22351"></a>  name: "max_range"
<a name="line-22352"></a>  type: DT_FLOAT
<a name="line-22353"></a>}
<a name="line-22354"></a>output_arg { name: "output" type: DT_FLOAT }
<a name="line-22355"></a>-}</span>
<a name="line-22356"></a>
<a name="line-22357"></a><a name="drawBoundingBoxes"></a><span class='hs-comment'>-- | Draw bounding boxes on a batch of images.</span>
<a name="line-22358"></a><span class='hs-comment'>--</span>
<a name="line-22359"></a><span class='hs-comment'>-- Outputs a copy of `images` but draws on top of the pixels zero or more bounding</span>
<a name="line-22360"></a><span class='hs-comment'>-- boxes specified by the locations in `boxes`. The coordinates of the each</span>
<a name="line-22361"></a><span class='hs-comment'>-- bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The</span>
<a name="line-22362"></a><span class='hs-comment'>-- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and</span>
<a name="line-22363"></a><span class='hs-comment'>-- height of the underlying image.</span>
<a name="line-22364"></a><span class='hs-comment'>-- </span>
<a name="line-22365"></a><span class='hs-comment'>-- For example, if an image is 100 x 200 pixels and the bounding box is</span>
<a name="line-22366"></a><span class='hs-comment'>-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the</span>
<a name="line-22367"></a><span class='hs-comment'>-- bounding box will be `(10, 40)` to `(50, 180)`.</span>
<a name="line-22368"></a><span class='hs-comment'>-- </span>
<a name="line-22369"></a><span class='hs-comment'>-- Parts of the bounding box may fall outside the image.</span>
<a name="line-22370"></a><span class='hs-definition'>drawBoundingBoxes</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22371"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22372"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images.</span>
<a name="line-22373"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding</span>
<a name="line-22374"></a>                                        <span class='hs-comment'>-- boxes.</span>
<a name="line-22375"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with the same shape as `images`. The batch of input images with</span>
<a name="line-22376"></a>                     <span class='hs-comment'>-- bounding boxes drawn on the images.</span>
<a name="line-22377"></a><span class='hs-definition'>drawBoundingBoxes</span> <span class='hs-varid'>images</span> <span class='hs-varid'>boxes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22378"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DrawBoundingBoxes"</span>
<a name="line-22379"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22380"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>boxes</span>
<a name="line-22381"></a><span class='hs-comment'>{-
<a name="line-22382"></a>attr {
<a name="line-22383"></a>  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
<a name="line-22384"></a>  default_value { type: DT_FLOAT }
<a name="line-22385"></a>  name: "T"
<a name="line-22386"></a>  type: "type"
<a name="line-22387"></a>}
<a name="line-22388"></a>input_arg {
<a name="line-22389"></a>  description: "4-D with shape `[batch, height, width, depth]`. A batch of images."
<a name="line-22390"></a>  name: "images"
<a name="line-22391"></a>  type_attr: "T"
<a name="line-22392"></a>}
<a name="line-22393"></a>input_arg {
<a name="line-22394"></a>  description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes."
<a name="line-22395"></a>  name: "boxes"
<a name="line-22396"></a>  type: DT_FLOAT
<a name="line-22397"></a>}
<a name="line-22398"></a>output_arg {
<a name="line-22399"></a>  description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images."
<a name="line-22400"></a>  name: "output"
<a name="line-22401"></a>  type_attr: "T"
<a name="line-22402"></a>}
<a name="line-22403"></a>-}</span>
<a name="line-22404"></a>
<a name="line-22405"></a><span class='hs-comment'>-- | </span>
<a name="line-22406"></a>
<a name="line-22407"></a><a name="tensorArraySplit"></a><span class='hs-definition'>tensorArraySplit</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22408"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-22409"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-22410"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __lengths__</span>
<a name="line-22411"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-22412"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __flow_out__</span>
<a name="line-22413"></a><span class='hs-definition'>tensorArraySplit</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>lengths</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22414"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArraySplit"</span>
<a name="line-22415"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22416"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>value</span> <span class='hs-varid'>lengths</span> <span class='hs-varid'>flow_in</span>
<a name="line-22417"></a><span class='hs-comment'>{-
<a name="line-22418"></a>attr { name: "T" type: "type" }
<a name="line-22419"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-22420"></a>input_arg { name: "value" type_attr: "T" }
<a name="line-22421"></a>input_arg { name: "lengths" type: DT_INT64 }
<a name="line-22422"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-22423"></a>output_arg { name: "flow_out" type: DT_FLOAT }
<a name="line-22424"></a>-}</span>
<a name="line-22425"></a>
<a name="line-22426"></a><a name="stringToHashBucketFast"></a><span class='hs-comment'>-- | Converts each string in the input Tensor to its hash mod by a number of buckets.</span>
<a name="line-22427"></a><span class='hs-comment'>--</span>
<a name="line-22428"></a><span class='hs-comment'>-- The hash function is deterministic on the content of the string within the</span>
<a name="line-22429"></a><span class='hs-comment'>-- process and will never change. However, it is not suitable for cryptography.</span>
<a name="line-22430"></a><span class='hs-comment'>-- This function may be used when CPU time is scarce and inputs are trusted or</span>
<a name="line-22431"></a><span class='hs-comment'>-- unimportant. There is a risk of adversaries constructing inputs that all hash</span>
<a name="line-22432"></a><span class='hs-comment'>-- to the same bucket. To prevent this problem, use a strong hash function with</span>
<a name="line-22433"></a><span class='hs-comment'>-- `tf.string_to_hash_bucket_strong`.</span>
<a name="line-22434"></a><span class='hs-definition'>stringToHashBucketFast</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_buckets__: The number of buckets.</span>
<a name="line-22435"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: The strings to assign a hash bucket.</span>
<a name="line-22436"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __output__: A Tensor of the same shape as the input `string_tensor`.</span>
<a name="line-22437"></a><span class='hs-definition'>stringToHashBucketFast</span> <span class='hs-varid'>num_buckets</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22438"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StringToHashBucketFast"</span>
<a name="line-22439"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_buckets"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_buckets</span><span class='hs-layout'>)</span>
<a name="line-22440"></a>        <span class='hs-varid'>input</span>
<a name="line-22441"></a><span class='hs-comment'>{-
<a name="line-22442"></a>attr {
<a name="line-22443"></a>  description: "The number of buckets."
<a name="line-22444"></a>  has_minimum: true
<a name="line-22445"></a>  minimum: 1
<a name="line-22446"></a>  name: "num_buckets"
<a name="line-22447"></a>  type: "int"
<a name="line-22448"></a>}
<a name="line-22449"></a>input_arg {
<a name="line-22450"></a>  description: "The strings to assign a hash bucket."
<a name="line-22451"></a>  name: "input"
<a name="line-22452"></a>  type: DT_STRING
<a name="line-22453"></a>}
<a name="line-22454"></a>output_arg {
<a name="line-22455"></a>  description: "A Tensor of the same shape as the input `string_tensor`."
<a name="line-22456"></a>  name: "output"
<a name="line-22457"></a>  type: DT_INT64
<a name="line-22458"></a>}
<a name="line-22459"></a>-}</span>
<a name="line-22460"></a>
<a name="line-22461"></a><span class='hs-comment'>-- | </span>
<a name="line-22462"></a>
<a name="line-22463"></a><a name="tensorArrayScatter"></a><span class='hs-definition'>tensorArrayScatter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22464"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-22465"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __indices__</span>
<a name="line-22466"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-22467"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-22468"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __flow_out__</span>
<a name="line-22469"></a><span class='hs-definition'>tensorArrayScatter</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22470"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayScatter"</span>
<a name="line-22471"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22472"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span>
<a name="line-22473"></a><span class='hs-comment'>{-
<a name="line-22474"></a>attr { name: "T" type: "type" }
<a name="line-22475"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-22476"></a>input_arg { name: "indices" type: DT_INT32 }
<a name="line-22477"></a>input_arg { name: "value" type_attr: "T" }
<a name="line-22478"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-22479"></a>output_arg { name: "flow_out" type: DT_FLOAT }
<a name="line-22480"></a>-}</span>
<a name="line-22481"></a>
<a name="line-22482"></a><a name="oneHot"></a><span class='hs-comment'>-- | Returns a one-hot tensor.</span>
<a name="line-22483"></a><span class='hs-comment'>--</span>
<a name="line-22484"></a><span class='hs-comment'>-- The locations represented by indices in `indices` take value `on_value`,</span>
<a name="line-22485"></a><span class='hs-comment'>-- while all other locations take value `off_value`.</span>
<a name="line-22486"></a><span class='hs-comment'>-- </span>
<a name="line-22487"></a><span class='hs-comment'>-- If the input `indices` is rank `N`, the output will have rank `N+1`,</span>
<a name="line-22488"></a><span class='hs-comment'>-- The new axis is created at dimension `axis` (default: the new axis is</span>
<a name="line-22489"></a><span class='hs-comment'>-- appended at the end).</span>
<a name="line-22490"></a><span class='hs-comment'>-- </span>
<a name="line-22491"></a><span class='hs-comment'>-- If `indices` is a scalar the output shape will be a vector of length `depth`.</span>
<a name="line-22492"></a><span class='hs-comment'>-- </span>
<a name="line-22493"></a><span class='hs-comment'>-- If `indices` is a vector of length `features`, the output shape will be:</span>
<a name="line-22494"></a><span class='hs-comment'>-- ```</span>
<a name="line-22495"></a><span class='hs-comment'>--   features x depth if axis == -1</span>
<a name="line-22496"></a><span class='hs-comment'>--   depth x features if axis == 0</span>
<a name="line-22497"></a><span class='hs-comment'>-- ```</span>
<a name="line-22498"></a><span class='hs-comment'>-- </span>
<a name="line-22499"></a><span class='hs-comment'>-- If `indices` is a matrix (batch) with shape `[batch, features]`,</span>
<a name="line-22500"></a><span class='hs-comment'>-- the output shape will be:</span>
<a name="line-22501"></a><span class='hs-comment'>-- ```</span>
<a name="line-22502"></a><span class='hs-comment'>--   batch x features x depth if axis == -1</span>
<a name="line-22503"></a><span class='hs-comment'>--   batch x depth x features if axis == 1</span>
<a name="line-22504"></a><span class='hs-comment'>--   depth x batch x features if axis == 0</span>
<a name="line-22505"></a><span class='hs-comment'>-- ```</span>
<a name="line-22506"></a><span class='hs-comment'>-- </span>
<a name="line-22507"></a><span class='hs-comment'>-- </span>
<a name="line-22508"></a><span class='hs-comment'>-- Examples</span>
<a name="line-22509"></a><span class='hs-comment'>-- =========</span>
<a name="line-22510"></a><span class='hs-comment'>-- </span>
<a name="line-22511"></a><span class='hs-comment'>-- Suppose that</span>
<a name="line-22512"></a><span class='hs-comment'>-- </span>
<a name="line-22513"></a><span class='hs-comment'>-- ```</span>
<a name="line-22514"></a><span class='hs-comment'>--   indices = [0, 2, -1, 1]</span>
<a name="line-22515"></a><span class='hs-comment'>--   depth = 3</span>
<a name="line-22516"></a><span class='hs-comment'>--   on_value = 5.0</span>
<a name="line-22517"></a><span class='hs-comment'>--   off_value = 0.0</span>
<a name="line-22518"></a><span class='hs-comment'>--   axis = -1</span>
<a name="line-22519"></a><span class='hs-comment'>-- ```</span>
<a name="line-22520"></a><span class='hs-comment'>-- </span>
<a name="line-22521"></a><span class='hs-comment'>-- Then output is `[4 x 3]`:</span>
<a name="line-22522"></a><span class='hs-comment'>-- </span>
<a name="line-22523"></a><span class='hs-comment'>--     ```output =</span>
<a name="line-22524"></a><span class='hs-comment'>--       [5.0 0.0 0.0]  // one_hot(0)</span>
<a name="line-22525"></a><span class='hs-comment'>--       [0.0 0.0 5.0]  // one_hot(2)</span>
<a name="line-22526"></a><span class='hs-comment'>--       [0.0 0.0 0.0]  // one_hot(-1)</span>
<a name="line-22527"></a><span class='hs-comment'>--       [0.0 5.0 0.0]  // one_hot(1)</span>
<a name="line-22528"></a><span class='hs-comment'>--     ```</span>
<a name="line-22529"></a><span class='hs-comment'>-- </span>
<a name="line-22530"></a><span class='hs-comment'>-- Suppose that</span>
<a name="line-22531"></a><span class='hs-comment'>-- </span>
<a name="line-22532"></a><span class='hs-comment'>-- ```</span>
<a name="line-22533"></a><span class='hs-comment'>--   indices = [0, 2, -1, 1]</span>
<a name="line-22534"></a><span class='hs-comment'>--   depth = 3</span>
<a name="line-22535"></a><span class='hs-comment'>--   on_value = 0.0</span>
<a name="line-22536"></a><span class='hs-comment'>--   off_value = 3.0</span>
<a name="line-22537"></a><span class='hs-comment'>--   axis = 0</span>
<a name="line-22538"></a><span class='hs-comment'>-- ```</span>
<a name="line-22539"></a><span class='hs-comment'>-- </span>
<a name="line-22540"></a><span class='hs-comment'>-- Then output is `[3 x 4]`:</span>
<a name="line-22541"></a><span class='hs-comment'>-- </span>
<a name="line-22542"></a><span class='hs-comment'>--     ```output =</span>
<a name="line-22543"></a><span class='hs-comment'>--       [0.0 3.0 3.0 3.0]</span>
<a name="line-22544"></a><span class='hs-comment'>--       [3.0 3.0 3.0 0.0]</span>
<a name="line-22545"></a><span class='hs-comment'>--       [3.0 3.0 3.0 3.0]</span>
<a name="line-22546"></a><span class='hs-comment'>--       [3.0 0.0 3.0 3.0]</span>
<a name="line-22547"></a><span class='hs-comment'>--     //  ^                one_hot(0)</span>
<a name="line-22548"></a><span class='hs-comment'>--     //      ^            one_hot(2)</span>
<a name="line-22549"></a><span class='hs-comment'>--     //          ^        one_hot(-1)</span>
<a name="line-22550"></a><span class='hs-comment'>--     //              ^    one_hot(1)</span>
<a name="line-22551"></a><span class='hs-comment'>--     ```</span>
<a name="line-22552"></a><span class='hs-comment'>-- Suppose that</span>
<a name="line-22553"></a><span class='hs-comment'>-- </span>
<a name="line-22554"></a><span class='hs-comment'>-- ```</span>
<a name="line-22555"></a><span class='hs-comment'>--   indices = [[0, 2], [1, -1]]</span>
<a name="line-22556"></a><span class='hs-comment'>--   depth = 3</span>
<a name="line-22557"></a><span class='hs-comment'>--   on_value = 1.0</span>
<a name="line-22558"></a><span class='hs-comment'>--   off_value = 0.0</span>
<a name="line-22559"></a><span class='hs-comment'>--   axis = -1</span>
<a name="line-22560"></a><span class='hs-comment'>-- ```</span>
<a name="line-22561"></a><span class='hs-comment'>-- </span>
<a name="line-22562"></a><span class='hs-comment'>-- Then output is `[2 x 2 x 3]`:</span>
<a name="line-22563"></a><span class='hs-comment'>-- </span>
<a name="line-22564"></a><span class='hs-comment'>--     ```output =</span>
<a name="line-22565"></a><span class='hs-comment'>--       [</span>
<a name="line-22566"></a><span class='hs-comment'>--         [1.0, 0.0, 0.0]  // one_hot(0)</span>
<a name="line-22567"></a><span class='hs-comment'>--         [0.0, 0.0, 1.0]  // one_hot(2)</span>
<a name="line-22568"></a><span class='hs-comment'>--       ][</span>
<a name="line-22569"></a><span class='hs-comment'>--         [0.0, 1.0, 0.0]  // one_hot(1)</span>
<a name="line-22570"></a><span class='hs-comment'>--         [0.0, 0.0, 0.0]  // one_hot(-1)</span>
<a name="line-22571"></a><span class='hs-comment'>--       ]```</span>
<a name="line-22572"></a><span class='hs-definition'>oneHot</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tI</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tI</span><span class='hs-layout'>,</span>
<a name="line-22573"></a>                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-22574"></a>                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tI</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22575"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tI</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices.</span>
<a name="line-22576"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __depth__: A scalar defining the depth of the one hot dimension.</span>
<a name="line-22577"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`.</span>
<a name="line-22578"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`.</span>
<a name="line-22579"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The one-hot tensor.</span>
<a name="line-22580"></a><span class='hs-definition'>oneHot</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>depth</span> <span class='hs-varid'>on_value</span> <span class='hs-varid'>off_value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22581"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"OneHot"</span>
<a name="line-22582"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-22583"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"TI"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tI</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22584"></a>        <span class='hs-varid'>indices</span> <span class='hs-varid'>depth</span> <span class='hs-varid'>on_value</span> <span class='hs-varid'>off_value</span>
<a name="line-22585"></a><span class='hs-comment'>{-
<a name="line-22586"></a>attr {
<a name="line-22587"></a>  default_value { i: -1 }
<a name="line-22588"></a>  description: "The axis to fill (default: -1, a new inner-most axis)."
<a name="line-22589"></a>  name: "axis"
<a name="line-22590"></a>  type: "int"
<a name="line-22591"></a>}
<a name="line-22592"></a>attr { name: "T" type: "type" }
<a name="line-22593"></a>attr {
<a name="line-22594"></a>  allowed_values {
<a name="line-22595"></a>    list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 }
<a name="line-22596"></a>  }
<a name="line-22597"></a>  default_value { type: DT_INT64 }
<a name="line-22598"></a>  name: "TI"
<a name="line-22599"></a>  type: "type"
<a name="line-22600"></a>}
<a name="line-22601"></a>input_arg {
<a name="line-22602"></a>  description: "A tensor of indices." name: "indices" type_attr: "TI"
<a name="line-22603"></a>}
<a name="line-22604"></a>input_arg {
<a name="line-22605"></a>  description: "A scalar defining the depth of the one hot dimension."
<a name="line-22606"></a>  name: "depth"
<a name="line-22607"></a>  type: DT_INT32
<a name="line-22608"></a>}
<a name="line-22609"></a>input_arg {
<a name="line-22610"></a>  description: "A scalar defining the value to fill in output when `indices[j] = i`."
<a name="line-22611"></a>  name: "on_value"
<a name="line-22612"></a>  type_attr: "T"
<a name="line-22613"></a>}
<a name="line-22614"></a>input_arg {
<a name="line-22615"></a>  description: "A scalar defining the value to fill in output when `indices[j] != i`."
<a name="line-22616"></a>  name: "off_value"
<a name="line-22617"></a>  type_attr: "T"
<a name="line-22618"></a>}
<a name="line-22619"></a>output_arg {
<a name="line-22620"></a>  description: "The one-hot tensor." name: "output" type_attr: "T"
<a name="line-22621"></a>}
<a name="line-22622"></a>-}</span>
<a name="line-22623"></a>
<a name="line-22624"></a><span class='hs-comment'>-- | </span>
<a name="line-22625"></a>
<a name="line-22626"></a><a name="batchIFFT3D"></a><span class='hs-definition'>batchIFFT3D</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-22627"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-22628"></a><span class='hs-definition'>batchIFFT3D</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22629"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchIFFT3D"</span><span class='hs-layout'>)</span>
<a name="line-22630"></a>        <span class='hs-varid'>input</span>
<a name="line-22631"></a><span class='hs-comment'>{-
<a name="line-22632"></a>input_arg { name: "input" type: DT_COMPLEX64 }
<a name="line-22633"></a>output_arg { name: "output" type: DT_COMPLEX64 }
<a name="line-22634"></a>-}</span>
<a name="line-22635"></a>
<a name="line-22636"></a><span class='hs-comment'>-- | Reinterpret the bytes of a string as a vector of numbers.</span>
<a name="line-22637"></a>
<a name="line-22638"></a><a name="decodeRaw"></a><span class='hs-definition'>decodeRaw</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-22639"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22640"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-22641"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-22642"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22643"></a>                                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-22644"></a>                                                                <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22645"></a>                                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22646"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __bytes__: All the elements must have the same length.</span>
<a name="line-22647"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span> <span class='hs-comment'>-- ^ __output__: A Tensor with one more dimension than the input `bytes`.  The</span>
<a name="line-22648"></a>             <span class='hs-comment'>-- added dimension will have size equal to the length of the elements</span>
<a name="line-22649"></a>             <span class='hs-comment'>-- of `bytes` divided by the number of bytes to represent `out_type`.</span>
<a name="line-22650"></a><span class='hs-definition'>decodeRaw</span> <span class='hs-varid'>bytes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22651"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodeRaw"</span>
<a name="line-22652"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22653"></a>        <span class='hs-varid'>bytes</span>
<a name="line-22654"></a><span class='hs-comment'>{-
<a name="line-22655"></a>attr {
<a name="line-22656"></a>  allowed_values {
<a name="line-22657"></a>    list {
<a name="line-22658"></a>      type: DT_HALF
<a name="line-22659"></a>      type: DT_FLOAT
<a name="line-22660"></a>      type: DT_DOUBLE
<a name="line-22661"></a>      type: DT_INT32
<a name="line-22662"></a>      type: DT_UINT8
<a name="line-22663"></a>      type: DT_INT16
<a name="line-22664"></a>      type: DT_INT8
<a name="line-22665"></a>      type: DT_INT64
<a name="line-22666"></a>    }
<a name="line-22667"></a>  }
<a name="line-22668"></a>  name: "out_type"
<a name="line-22669"></a>  type: "type"
<a name="line-22670"></a>}
<a name="line-22671"></a>attr {
<a name="line-22672"></a>  default_value { b: true }
<a name="line-22673"></a>  description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`."
<a name="line-22674"></a>  name: "little_endian"
<a name="line-22675"></a>  type: "bool"
<a name="line-22676"></a>}
<a name="line-22677"></a>input_arg {
<a name="line-22678"></a>  description: "All the elements must have the same length."
<a name="line-22679"></a>  name: "bytes"
<a name="line-22680"></a>  type: DT_STRING
<a name="line-22681"></a>}
<a name="line-22682"></a>output_arg {
<a name="line-22683"></a>  description: "A Tensor with one more dimension than the input `bytes`.  The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`."
<a name="line-22684"></a>  name: "output"
<a name="line-22685"></a>  type_attr: "out_type"
<a name="line-22686"></a>}
<a name="line-22687"></a>-}</span>
<a name="line-22688"></a>
<a name="line-22689"></a><span class='hs-comment'>-- | </span>
<a name="line-22690"></a>
<a name="line-22691"></a><a name="tensorArrayPack"></a><span class='hs-definition'>tensorArrayPack</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22692"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-22693"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-22694"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-22695"></a><span class='hs-definition'>tensorArrayPack</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22696"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayPack"</span>
<a name="line-22697"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22698"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-22699"></a><span class='hs-comment'>{-
<a name="line-22700"></a>attr { name: "dtype" type: "type" }
<a name="line-22701"></a>attr {
<a name="line-22702"></a>  default_value { shape { unknown_rank: true } }
<a name="line-22703"></a>  name: "element_shape"
<a name="line-22704"></a>  type: "shape"
<a name="line-22705"></a>}
<a name="line-22706"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-22707"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-22708"></a>output_arg { name: "value" type_attr: "dtype" }
<a name="line-22709"></a>-}</span>
<a name="line-22710"></a>
<a name="line-22711"></a><a name="applyProximalAdagrad"></a><span class='hs-comment'>-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.</span>
<a name="line-22712"></a><span class='hs-comment'>--</span>
<a name="line-22713"></a><span class='hs-comment'>-- accum += grad * grad</span>
<a name="line-22714"></a><span class='hs-comment'>-- prox_v = var - lr * grad * (1 / sqrt(accum))</span>
<a name="line-22715"></a><span class='hs-comment'>-- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}</span>
<a name="line-22716"></a><span class='hs-definition'>applyProximalAdagrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-22717"></a>                                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22718"></a>                                                        <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22719"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-22720"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22721"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-22722"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-22723"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22724"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22725"></a>                                                        <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22726"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-22727"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-22728"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-22729"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regularization. Must be a scalar.</span>
<a name="line-22730"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regularization. Must be a scalar.</span>
<a name="line-22731"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-22732"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-22733"></a><span class='hs-definition'>applyProximalAdagrad</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22734"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyProximalAdagrad"</span>
<a name="line-22735"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22736"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>grad</span>
<a name="line-22737"></a><span class='hs-comment'>{-
<a name="line-22738"></a>attr {
<a name="line-22739"></a>  allowed_values {
<a name="line-22740"></a>    list {
<a name="line-22741"></a>      type: DT_FLOAT
<a name="line-22742"></a>      type: DT_DOUBLE
<a name="line-22743"></a>      type: DT_INT64
<a name="line-22744"></a>      type: DT_INT32
<a name="line-22745"></a>      type: DT_UINT8
<a name="line-22746"></a>      type: DT_UINT16
<a name="line-22747"></a>      type: DT_INT16
<a name="line-22748"></a>      type: DT_INT8
<a name="line-22749"></a>      type: DT_COMPLEX64
<a name="line-22750"></a>      type: DT_COMPLEX128
<a name="line-22751"></a>      type: DT_QINT8
<a name="line-22752"></a>      type: DT_QUINT8
<a name="line-22753"></a>      type: DT_QINT32
<a name="line-22754"></a>      type: DT_HALF
<a name="line-22755"></a>    }
<a name="line-22756"></a>  }
<a name="line-22757"></a>  name: "T"
<a name="line-22758"></a>  type: "type"
<a name="line-22759"></a>}
<a name="line-22760"></a>attr {
<a name="line-22761"></a>  default_value { b: false }
<a name="line-22762"></a>  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
<a name="line-22763"></a>  name: "use_locking"
<a name="line-22764"></a>  type: "bool"
<a name="line-22765"></a>}
<a name="line-22766"></a>input_arg {
<a name="line-22767"></a>  description: "Should be from a Variable()."
<a name="line-22768"></a>  is_ref: true
<a name="line-22769"></a>  name: "var"
<a name="line-22770"></a>  type_attr: "T"
<a name="line-22771"></a>}
<a name="line-22772"></a>input_arg {
<a name="line-22773"></a>  description: "Should be from a Variable()."
<a name="line-22774"></a>  is_ref: true
<a name="line-22775"></a>  name: "accum"
<a name="line-22776"></a>  type_attr: "T"
<a name="line-22777"></a>}
<a name="line-22778"></a>input_arg {
<a name="line-22779"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-22780"></a>  name: "lr"
<a name="line-22781"></a>  type_attr: "T"
<a name="line-22782"></a>}
<a name="line-22783"></a>input_arg {
<a name="line-22784"></a>  description: "L1 regularization. Must be a scalar."
<a name="line-22785"></a>  name: "l1"
<a name="line-22786"></a>  type_attr: "T"
<a name="line-22787"></a>}
<a name="line-22788"></a>input_arg {
<a name="line-22789"></a>  description: "L2 regularization. Must be a scalar."
<a name="line-22790"></a>  name: "l2"
<a name="line-22791"></a>  type_attr: "T"
<a name="line-22792"></a>}
<a name="line-22793"></a>input_arg {
<a name="line-22794"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-22795"></a>}
<a name="line-22796"></a>output_arg {
<a name="line-22797"></a>  description: "Same as \"var\"."
<a name="line-22798"></a>  is_ref: true
<a name="line-22799"></a>  name: "out"
<a name="line-22800"></a>  type_attr: "T"
<a name="line-22801"></a>}
<a name="line-22802"></a>-}</span>
<a name="line-22803"></a>
<a name="line-22804"></a><a name="sparseAccumulatorApplyGradient"></a><span class='hs-comment'>-- | Applies a sparse gradient to a given accumulator. Does not add if local_step is</span>
<a name="line-22805"></a><span class='hs-comment'>--</span>
<a name="line-22806"></a><span class='hs-comment'>-- lesser than the accumulator's global_step.</span>
<a name="line-22807"></a><span class='hs-definition'>sparseAccumulatorApplyGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-22808"></a>                                                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22809"></a>                                                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22810"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-22811"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22812"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-22813"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-22814"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22815"></a>                                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-22816"></a>                                                                      <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22817"></a>                                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22818"></a>                                  <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which</span>
<a name="line-22819"></a>                                       <span class='hs-comment'>-- case the input is ignored during validation.</span>
<a name="line-22820"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a accumulator.</span>
<a name="line-22821"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __local_step__: The local_step value at which the sparse gradient was computed.</span>
<a name="line-22822"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a</span>
<a name="line-22823"></a>                                                              <span class='hs-comment'>-- vector.</span>
<a name="line-22824"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have</span>
<a name="line-22825"></a>                                                     <span class='hs-comment'>-- the same first dimension as indices, i.e., the nnz represented by indices and</span>
<a name="line-22826"></a>                                                     <span class='hs-comment'>-- values must be consistent.</span>
<a name="line-22827"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated.</span>
<a name="line-22828"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-22829"></a><span class='hs-definition'>sparseAccumulatorApplyGradient</span> <span class='hs-varid'>has_known_shape</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>local_step</span>
<a name="line-22830"></a>                               <span class='hs-varid'>gradient_indices</span> <span class='hs-varid'>gradient_values</span>
<a name="line-22831"></a>                               <span class='hs-varid'>gradient_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22832"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseAccumulatorApplyGradient"</span>
<a name="line-22833"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-22834"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"has_known_shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>has_known_shape</span><span class='hs-layout'>)</span>
<a name="line-22835"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>local_step</span> <span class='hs-varid'>gradient_indices</span> <span class='hs-varid'>gradient_values</span> <span class='hs-varid'>gradient_shape</span>
<a name="line-22836"></a><span class='hs-comment'>{-
<a name="line-22837"></a>attr {
<a name="line-22838"></a>  allowed_values {
<a name="line-22839"></a>    list {
<a name="line-22840"></a>      type: DT_FLOAT
<a name="line-22841"></a>      type: DT_DOUBLE
<a name="line-22842"></a>      type: DT_INT64
<a name="line-22843"></a>      type: DT_INT32
<a name="line-22844"></a>      type: DT_UINT8
<a name="line-22845"></a>      type: DT_UINT16
<a name="line-22846"></a>      type: DT_INT16
<a name="line-22847"></a>      type: DT_INT8
<a name="line-22848"></a>      type: DT_COMPLEX64
<a name="line-22849"></a>      type: DT_COMPLEX128
<a name="line-22850"></a>      type: DT_QINT8
<a name="line-22851"></a>      type: DT_QUINT8
<a name="line-22852"></a>      type: DT_QINT32
<a name="line-22853"></a>      type: DT_HALF
<a name="line-22854"></a>    }
<a name="line-22855"></a>  }
<a name="line-22856"></a>  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
<a name="line-22857"></a>  name: "dtype"
<a name="line-22858"></a>  type: "type"
<a name="line-22859"></a>}
<a name="line-22860"></a>attr {
<a name="line-22861"></a>  description: "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation."
<a name="line-22862"></a>  name: "has_known_shape"
<a name="line-22863"></a>  type: "bool"
<a name="line-22864"></a>}
<a name="line-22865"></a>input_arg {
<a name="line-22866"></a>  description: "The handle to a accumulator."
<a name="line-22867"></a>  is_ref: true
<a name="line-22868"></a>  name: "handle"
<a name="line-22869"></a>  type: DT_STRING
<a name="line-22870"></a>}
<a name="line-22871"></a>input_arg {
<a name="line-22872"></a>  description: "The local_step value at which the sparse gradient was computed."
<a name="line-22873"></a>  name: "local_step"
<a name="line-22874"></a>  type: DT_INT64
<a name="line-22875"></a>}
<a name="line-22876"></a>input_arg {
<a name="line-22877"></a>  description: "Indices of the sparse gradient to be accumulated. Must be a\nvector."
<a name="line-22878"></a>  name: "gradient_indices"
<a name="line-22879"></a>  type: DT_INT64
<a name="line-22880"></a>}
<a name="line-22881"></a>input_arg {
<a name="line-22882"></a>  description: "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent."
<a name="line-22883"></a>  name: "gradient_values"
<a name="line-22884"></a>  type_attr: "dtype"
<a name="line-22885"></a>}
<a name="line-22886"></a>input_arg {
<a name="line-22887"></a>  description: "Shape of the sparse gradient to be accumulated."
<a name="line-22888"></a>  name: "gradient_shape"
<a name="line-22889"></a>  type: DT_INT64
<a name="line-22890"></a>}
<a name="line-22891"></a>-}</span>
<a name="line-22892"></a>
<a name="line-22893"></a><a name="add"></a><span class='hs-comment'>-- | Returns x + y element-wise.</span>
<a name="line-22894"></a><span class='hs-comment'>--</span>
<a name="line-22895"></a><span class='hs-comment'>-- *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting</span>
<a name="line-22896"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-22897"></a><span class='hs-definition'>add</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22898"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-22899"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-22900"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22901"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-22902"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22903"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22904"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22905"></a>       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-22906"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-22907"></a>       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-22908"></a><span class='hs-definition'>add</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22909"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Add"</span>
<a name="line-22910"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22911"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-22912"></a><span class='hs-comment'>{-
<a name="line-22913"></a>attr {
<a name="line-22914"></a>  allowed_values {
<a name="line-22915"></a>    list {
<a name="line-22916"></a>      type: DT_HALF
<a name="line-22917"></a>      type: DT_FLOAT
<a name="line-22918"></a>      type: DT_DOUBLE
<a name="line-22919"></a>      type: DT_UINT8
<a name="line-22920"></a>      type: DT_INT8
<a name="line-22921"></a>      type: DT_INT16
<a name="line-22922"></a>      type: DT_INT32
<a name="line-22923"></a>      type: DT_INT64
<a name="line-22924"></a>      type: DT_COMPLEX64
<a name="line-22925"></a>      type: DT_COMPLEX128
<a name="line-22926"></a>      type: DT_STRING
<a name="line-22927"></a>    }
<a name="line-22928"></a>  }
<a name="line-22929"></a>  name: "T"
<a name="line-22930"></a>  type: "type"
<a name="line-22931"></a>}
<a name="line-22932"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-22933"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-22934"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-22935"></a>-}</span>
<a name="line-22936"></a>
<a name="line-22937"></a><span class='hs-comment'>-- | Computes softsign: `features / (abs(features) + 1)`.</span>
<a name="line-22938"></a>
<a name="line-22939"></a><a name="softsign"></a><span class='hs-definition'>softsign</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-22940"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-22941"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-22942"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-22943"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22944"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-22945"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __activations__</span>
<a name="line-22946"></a><span class='hs-definition'>softsign</span> <span class='hs-varid'>features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22947"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Softsign"</span>
<a name="line-22948"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22949"></a>        <span class='hs-varid'>features</span>
<a name="line-22950"></a><span class='hs-comment'>{-
<a name="line-22951"></a>attr {
<a name="line-22952"></a>  allowed_values {
<a name="line-22953"></a>    list {
<a name="line-22954"></a>      type: DT_FLOAT
<a name="line-22955"></a>      type: DT_DOUBLE
<a name="line-22956"></a>      type: DT_INT32
<a name="line-22957"></a>      type: DT_INT64
<a name="line-22958"></a>      type: DT_UINT8
<a name="line-22959"></a>      type: DT_INT16
<a name="line-22960"></a>      type: DT_INT8
<a name="line-22961"></a>      type: DT_UINT16
<a name="line-22962"></a>      type: DT_HALF
<a name="line-22963"></a>    }
<a name="line-22964"></a>  }
<a name="line-22965"></a>  name: "T"
<a name="line-22966"></a>  type: "type"
<a name="line-22967"></a>}
<a name="line-22968"></a>input_arg { name: "features" type_attr: "T" }
<a name="line-22969"></a>output_arg { name: "activations" type_attr: "T" }
<a name="line-22970"></a>-}</span>
<a name="line-22971"></a>
<a name="line-22972"></a><span class='hs-comment'>-- | </span>
<a name="line-22973"></a>
<a name="line-22974"></a><a name="tensorArrayRead"></a><span class='hs-definition'>tensorArrayRead</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-22975"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-22976"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __index__</span>
<a name="line-22977"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-22978"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __value__</span>
<a name="line-22979"></a><span class='hs-definition'>tensorArrayRead</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-22980"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayRead"</span>
<a name="line-22981"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-22982"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>flow_in</span>
<a name="line-22983"></a><span class='hs-comment'>{-
<a name="line-22984"></a>attr { name: "dtype" type: "type" }
<a name="line-22985"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-22986"></a>input_arg { name: "index" type: DT_INT32 }
<a name="line-22987"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-22988"></a>output_arg { name: "value" type_attr: "dtype" }
<a name="line-22989"></a>-}</span>
<a name="line-22990"></a>
<a name="line-22991"></a><a name="scatterNdSub"></a><span class='hs-comment'>-- | Applies sparse subtraction between `updates` and individual values or slices</span>
<a name="line-22992"></a><span class='hs-comment'>--</span>
<a name="line-22993"></a><span class='hs-comment'>-- within a given variable according to `indices`.</span>
<a name="line-22994"></a><span class='hs-comment'>-- </span>
<a name="line-22995"></a><span class='hs-comment'>-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.</span>
<a name="line-22996"></a><span class='hs-comment'>-- </span>
<a name="line-22997"></a><span class='hs-comment'>-- `indices` must be integer tensor, containing indices into `ref`.</span>
<a name="line-22998"></a><span class='hs-comment'>-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt; K &lt;= P`.</span>
<a name="line-22999"></a><span class='hs-comment'>-- </span>
<a name="line-23000"></a><span class='hs-comment'>-- The innermost dimension of `indices` (with length `K`) corresponds to</span>
<a name="line-23001"></a><span class='hs-comment'>-- indices into elements (if `K = P`) or slices (if `K &lt; P`) along the `K`th</span>
<a name="line-23002"></a><span class='hs-comment'>-- dimension of `ref`.</span>
<a name="line-23003"></a><span class='hs-comment'>-- </span>
<a name="line-23004"></a><span class='hs-comment'>-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:</span>
<a name="line-23005"></a><span class='hs-comment'>-- </span>
<a name="line-23006"></a><span class='hs-comment'>-- ```</span>
<a name="line-23007"></a><span class='hs-comment'>-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].</span>
<a name="line-23008"></a><span class='hs-comment'>-- ```</span>
<a name="line-23009"></a><span class='hs-comment'>-- </span>
<a name="line-23010"></a><span class='hs-comment'>-- For example, say we want to subtract 4 scattered elements from a rank-1 tensor</span>
<a name="line-23011"></a><span class='hs-comment'>-- with 8 elements. In Python, that subtraction would look like this:</span>
<a name="line-23012"></a><span class='hs-comment'>-- </span>
<a name="line-23013"></a><span class='hs-comment'>--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])</span>
<a name="line-23014"></a><span class='hs-comment'>--     indices = tf.constant([[4], [3], [1], [7]])</span>
<a name="line-23015"></a><span class='hs-comment'>--     updates = tf.constant([9, 10, 11, 12])</span>
<a name="line-23016"></a><span class='hs-comment'>--     sub = tf.scatter_nd_sub(ref, indices, updates)</span>
<a name="line-23017"></a><span class='hs-comment'>--     with tf.Session() as sess:</span>
<a name="line-23018"></a><span class='hs-comment'>--       print sess.run(sub)</span>
<a name="line-23019"></a><span class='hs-comment'>-- </span>
<a name="line-23020"></a><span class='hs-comment'>-- The resulting update to ref would look like this:</span>
<a name="line-23021"></a><span class='hs-comment'>-- </span>
<a name="line-23022"></a><span class='hs-comment'>--     [1, -9, 3, -6, -4, 6, 7, -4]</span>
<a name="line-23023"></a><span class='hs-comment'>-- </span>
<a name="line-23024"></a><span class='hs-comment'>-- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to</span>
<a name="line-23025"></a><span class='hs-comment'>-- slices.</span>
<a name="line-23026"></a><span class='hs-definition'>scatterNdSub</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-23027"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23028"></a>                                                   <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23029"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23030"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23031"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-23032"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23033"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23034"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23035"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-23036"></a>                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-23037"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23038"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23039"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: A mutable Tensor. Should be from a Variable node.</span>
<a name="line-23040"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.</span>
<a name="line-23041"></a>                                      <span class='hs-comment'>-- A tensor of indices into ref.</span>
<a name="line-23042"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values</span>
<a name="line-23043"></a>                               <span class='hs-comment'>-- to subtract from ref.</span>
<a name="line-23044"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want</span>
<a name="line-23045"></a>                <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-23046"></a><span class='hs-definition'>scatterNdSub</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23047"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterNdSub"</span>
<a name="line-23048"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-23049"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23050"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-23051"></a><span class='hs-comment'>{-
<a name="line-23052"></a>attr {
<a name="line-23053"></a>  allowed_values {
<a name="line-23054"></a>    list {
<a name="line-23055"></a>      type: DT_FLOAT
<a name="line-23056"></a>      type: DT_DOUBLE
<a name="line-23057"></a>      type: DT_INT64
<a name="line-23058"></a>      type: DT_INT32
<a name="line-23059"></a>      type: DT_UINT8
<a name="line-23060"></a>      type: DT_UINT16
<a name="line-23061"></a>      type: DT_INT16
<a name="line-23062"></a>      type: DT_INT8
<a name="line-23063"></a>      type: DT_COMPLEX64
<a name="line-23064"></a>      type: DT_COMPLEX128
<a name="line-23065"></a>      type: DT_QINT8
<a name="line-23066"></a>      type: DT_QUINT8
<a name="line-23067"></a>      type: DT_QINT32
<a name="line-23068"></a>      type: DT_HALF
<a name="line-23069"></a>    }
<a name="line-23070"></a>  }
<a name="line-23071"></a>  name: "T"
<a name="line-23072"></a>  type: "type"
<a name="line-23073"></a>}
<a name="line-23074"></a>attr {
<a name="line-23075"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-23076"></a>  name: "Tindices"
<a name="line-23077"></a>  type: "type"
<a name="line-23078"></a>}
<a name="line-23079"></a>attr {
<a name="line-23080"></a>  default_value { b: false }
<a name="line-23081"></a>  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
<a name="line-23082"></a>  name: "use_locking"
<a name="line-23083"></a>  type: "bool"
<a name="line-23084"></a>}
<a name="line-23085"></a>input_arg {
<a name="line-23086"></a>  description: "A mutable Tensor. Should be from a Variable node."
<a name="line-23087"></a>  is_ref: true
<a name="line-23088"></a>  name: "ref"
<a name="line-23089"></a>  type_attr: "T"
<a name="line-23090"></a>}
<a name="line-23091"></a>input_arg {
<a name="line-23092"></a>  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
<a name="line-23093"></a>  name: "indices"
<a name="line-23094"></a>  type_attr: "Tindices"
<a name="line-23095"></a>}
<a name="line-23096"></a>input_arg {
<a name="line-23097"></a>  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref."
<a name="line-23098"></a>  name: "updates"
<a name="line-23099"></a>  type_attr: "T"
<a name="line-23100"></a>}
<a name="line-23101"></a>output_arg {
<a name="line-23102"></a>  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-23103"></a>  is_ref: true
<a name="line-23104"></a>  name: "output_ref"
<a name="line-23105"></a>  type_attr: "T"
<a name="line-23106"></a>}
<a name="line-23107"></a>-}</span>
<a name="line-23108"></a>
<a name="line-23109"></a><a name="restoreSlice"></a><span class='hs-comment'>-- | Restores a tensor from checkpoint files.</span>
<a name="line-23110"></a><span class='hs-comment'>--</span>
<a name="line-23111"></a><span class='hs-comment'>-- This is like `Restore` except that restored tensor can be listed as filling</span>
<a name="line-23112"></a><span class='hs-comment'>-- only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the</span>
<a name="line-23113"></a><span class='hs-comment'>-- larger tensor and the slice that the restored tensor covers.</span>
<a name="line-23114"></a><span class='hs-comment'>-- </span>
<a name="line-23115"></a><span class='hs-comment'>-- The `shape_and_slice` input has the same format as the</span>
<a name="line-23116"></a><span class='hs-comment'>-- elements of the `shapes_and_slices` input of the `SaveSlices` op.</span>
<a name="line-23117"></a><span class='hs-definition'>restoreSlice</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dt</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dt</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23118"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __file_pattern__: Must have a single element. The pattern of the files from</span>
<a name="line-23119"></a>                                                     <span class='hs-comment'>-- which we read the tensor.</span>
<a name="line-23120"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tensor_name__: Must have a single element. The name of the tensor to be</span>
<a name="line-23121"></a>                                                        <span class='hs-comment'>-- restored.</span>
<a name="line-23122"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when</span>
<a name="line-23123"></a>                                                        <span class='hs-comment'>-- restoring a tensors.</span>
<a name="line-23124"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dt</span> <span class='hs-comment'>-- ^ __tensor__: The restored tensor.</span>
<a name="line-23125"></a><span class='hs-definition'>restoreSlice</span> <span class='hs-varid'>file_pattern</span> <span class='hs-varid'>tensor_name</span> <span class='hs-varid'>shape_and_slice</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23126"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RestoreSlice"</span>
<a name="line-23127"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dt"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dt</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23128"></a>        <span class='hs-varid'>file_pattern</span> <span class='hs-varid'>tensor_name</span> <span class='hs-varid'>shape_and_slice</span>
<a name="line-23129"></a><span class='hs-comment'>{-
<a name="line-23130"></a>attr {
<a name="line-23131"></a>  description: "The type of the tensor to be restored."
<a name="line-23132"></a>  name: "dt"
<a name="line-23133"></a>  type: "type"
<a name="line-23134"></a>}
<a name="line-23135"></a>attr {
<a name="line-23136"></a>  default_value { i: -1 }
<a name="line-23137"></a>  description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`."
<a name="line-23138"></a>  name: "preferred_shard"
<a name="line-23139"></a>  type: "int"
<a name="line-23140"></a>}
<a name="line-23141"></a>input_arg {
<a name="line-23142"></a>  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
<a name="line-23143"></a>  name: "file_pattern"
<a name="line-23144"></a>  type: DT_STRING
<a name="line-23145"></a>}
<a name="line-23146"></a>input_arg {
<a name="line-23147"></a>  description: "Must have a single element. The name of the tensor to be\nrestored."
<a name="line-23148"></a>  name: "tensor_name"
<a name="line-23149"></a>  type: DT_STRING
<a name="line-23150"></a>}
<a name="line-23151"></a>input_arg {
<a name="line-23152"></a>  description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors."
<a name="line-23153"></a>  name: "shape_and_slice"
<a name="line-23154"></a>  type: DT_STRING
<a name="line-23155"></a>}
<a name="line-23156"></a>output_arg {
<a name="line-23157"></a>  description: "The restored tensor." name: "tensor" type_attr: "dt"
<a name="line-23158"></a>}
<a name="line-23159"></a>-}</span>
<a name="line-23160"></a>
<a name="line-23161"></a><a name="assignAdd"></a><span class='hs-comment'>-- | Update 'ref' by adding 'value' to it.</span>
<a name="line-23162"></a><span class='hs-comment'>--</span>
<a name="line-23163"></a><span class='hs-comment'>-- This operation outputs "ref" after the update is done.</span>
<a name="line-23164"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-23165"></a><span class='hs-definition'>assignAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23166"></a>                                                  <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23167"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23168"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23169"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23170"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23171"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23172"></a>                                                  <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23173"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-23174"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The value to be added to the variable.</span>
<a name="line-23175"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want</span>
<a name="line-23176"></a>             <span class='hs-comment'>-- to use the new value after the variable has been updated.</span>
<a name="line-23177"></a><span class='hs-definition'>assignAdd</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23178"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AssignAdd"</span>
<a name="line-23179"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23180"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>value</span>
<a name="line-23181"></a><span class='hs-comment'>{-
<a name="line-23182"></a>attr {
<a name="line-23183"></a>  allowed_values {
<a name="line-23184"></a>    list {
<a name="line-23185"></a>      type: DT_FLOAT
<a name="line-23186"></a>      type: DT_DOUBLE
<a name="line-23187"></a>      type: DT_INT64
<a name="line-23188"></a>      type: DT_INT32
<a name="line-23189"></a>      type: DT_UINT8
<a name="line-23190"></a>      type: DT_UINT16
<a name="line-23191"></a>      type: DT_INT16
<a name="line-23192"></a>      type: DT_INT8
<a name="line-23193"></a>      type: DT_COMPLEX64
<a name="line-23194"></a>      type: DT_COMPLEX128
<a name="line-23195"></a>      type: DT_QINT8
<a name="line-23196"></a>      type: DT_QUINT8
<a name="line-23197"></a>      type: DT_QINT32
<a name="line-23198"></a>      type: DT_HALF
<a name="line-23199"></a>    }
<a name="line-23200"></a>  }
<a name="line-23201"></a>  name: "T"
<a name="line-23202"></a>  type: "type"
<a name="line-23203"></a>}
<a name="line-23204"></a>attr {
<a name="line-23205"></a>  default_value { b: false }
<a name="line-23206"></a>  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-23207"></a>  name: "use_locking"
<a name="line-23208"></a>  type: "bool"
<a name="line-23209"></a>}
<a name="line-23210"></a>input_arg {
<a name="line-23211"></a>  description: "Should be from a `Variable` node."
<a name="line-23212"></a>  is_ref: true
<a name="line-23213"></a>  name: "ref"
<a name="line-23214"></a>  type_attr: "T"
<a name="line-23215"></a>}
<a name="line-23216"></a>input_arg {
<a name="line-23217"></a>  description: "The value to be added to the variable."
<a name="line-23218"></a>  name: "value"
<a name="line-23219"></a>  type_attr: "T"
<a name="line-23220"></a>}
<a name="line-23221"></a>output_arg {
<a name="line-23222"></a>  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
<a name="line-23223"></a>  is_ref: true
<a name="line-23224"></a>  name: "output_ref"
<a name="line-23225"></a>  type_attr: "T"
<a name="line-23226"></a>}
<a name="line-23227"></a>-}</span>
<a name="line-23228"></a>
<a name="line-23229"></a><a name="greater"></a><span class='hs-comment'>-- | Returns the truth value of (x &gt; y) element-wise.</span>
<a name="line-23230"></a><span class='hs-comment'>--</span>
<a name="line-23231"></a><span class='hs-comment'>-- *NOTE*: `Greater` supports broadcasting. More about broadcasting</span>
<a name="line-23232"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-23233"></a><span class='hs-definition'>greater</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23234"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23235"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-23236"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23237"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23238"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23239"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23240"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-23241"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-23242"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-23243"></a><span class='hs-definition'>greater</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23244"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Greater"</span>
<a name="line-23245"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23246"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-23247"></a><span class='hs-comment'>{-
<a name="line-23248"></a>attr {
<a name="line-23249"></a>  allowed_values {
<a name="line-23250"></a>    list {
<a name="line-23251"></a>      type: DT_FLOAT
<a name="line-23252"></a>      type: DT_DOUBLE
<a name="line-23253"></a>      type: DT_INT32
<a name="line-23254"></a>      type: DT_INT64
<a name="line-23255"></a>      type: DT_UINT8
<a name="line-23256"></a>      type: DT_INT16
<a name="line-23257"></a>      type: DT_INT8
<a name="line-23258"></a>      type: DT_UINT16
<a name="line-23259"></a>      type: DT_HALF
<a name="line-23260"></a>    }
<a name="line-23261"></a>  }
<a name="line-23262"></a>  name: "T"
<a name="line-23263"></a>  type: "type"
<a name="line-23264"></a>}
<a name="line-23265"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-23266"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-23267"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-23268"></a>-}</span>
<a name="line-23269"></a>
<a name="line-23270"></a><span class='hs-comment'>-- | Returns the number of work units this Reader has finished processing.</span>
<a name="line-23271"></a>
<a name="line-23272"></a><a name="readerNumWorkUnitsCompleted"></a><span class='hs-definition'>readerNumWorkUnitsCompleted</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-23273"></a>                               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __units_completed__</span>
<a name="line-23274"></a><span class='hs-definition'>readerNumWorkUnitsCompleted</span> <span class='hs-varid'>reader_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23275"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderNumWorkUnitsCompleted"</span><span class='hs-layout'>)</span>
<a name="line-23276"></a>        <span class='hs-varid'>reader_handle</span>
<a name="line-23277"></a><span class='hs-comment'>{-
<a name="line-23278"></a>input_arg {
<a name="line-23279"></a>  description: "Handle to a Reader."
<a name="line-23280"></a>  is_ref: true
<a name="line-23281"></a>  name: "reader_handle"
<a name="line-23282"></a>  type: DT_STRING
<a name="line-23283"></a>}
<a name="line-23284"></a>output_arg { name: "units_completed" type: DT_INT64 }
<a name="line-23285"></a>-}</span>
<a name="line-23286"></a>
<a name="line-23287"></a><a name="tensorArrayGatherV2"></a><span class='hs-comment'>-- | Gather specific elements from the TensorArray into output `value`.</span>
<a name="line-23288"></a><span class='hs-comment'>--</span>
<a name="line-23289"></a><span class='hs-comment'>-- All elements selected by `indices` must have the same shape.</span>
<a name="line-23290"></a><span class='hs-definition'>tensorArrayGatherV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23291"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-23292"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __indices__: The locations in the TensorArray from which to read tensor elements.</span>
<a name="line-23293"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-23294"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: All of the elements in the TensorArray, concatenated along a new</span>
<a name="line-23295"></a>                       <span class='hs-comment'>-- axis (the new dimension 0).</span>
<a name="line-23296"></a><span class='hs-definition'>tensorArrayGatherV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23297"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayGatherV2"</span>
<a name="line-23298"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23299"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>flow_in</span>
<a name="line-23300"></a><span class='hs-comment'>{-
<a name="line-23301"></a>attr {
<a name="line-23302"></a>  description: "The type of the elem that is returned."
<a name="line-23303"></a>  name: "dtype"
<a name="line-23304"></a>  type: "type"
<a name="line-23305"></a>}
<a name="line-23306"></a>attr {
<a name="line-23307"></a>  default_value { shape { unknown_rank: true } }
<a name="line-23308"></a>  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error."
<a name="line-23309"></a>  name: "element_shape"
<a name="line-23310"></a>  type: "shape"
<a name="line-23311"></a>}
<a name="line-23312"></a>input_arg {
<a name="line-23313"></a>  description: "The handle to a TensorArray."
<a name="line-23314"></a>  name: "handle"
<a name="line-23315"></a>  type: DT_STRING
<a name="line-23316"></a>}
<a name="line-23317"></a>input_arg {
<a name="line-23318"></a>  description: "The locations in the TensorArray from which to read tensor elements."
<a name="line-23319"></a>  name: "indices"
<a name="line-23320"></a>  type: DT_INT32
<a name="line-23321"></a>}
<a name="line-23322"></a>input_arg {
<a name="line-23323"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-23324"></a>  name: "flow_in"
<a name="line-23325"></a>  type: DT_FLOAT
<a name="line-23326"></a>}
<a name="line-23327"></a>output_arg {
<a name="line-23328"></a>  description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)."
<a name="line-23329"></a>  name: "value"
<a name="line-23330"></a>  type_attr: "dtype"
<a name="line-23331"></a>}
<a name="line-23332"></a>-}</span>
<a name="line-23333"></a>
<a name="line-23334"></a><span class='hs-comment'>-- | Read an element from the TensorArray into output `value`.</span>
<a name="line-23335"></a>
<a name="line-23336"></a><a name="tensorArrayReadV2"></a><span class='hs-definition'>tensorArrayReadV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23337"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-23338"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __index__</span>
<a name="line-23339"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-23340"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __value__: The tensor that is read from the TensorArray.</span>
<a name="line-23341"></a><span class='hs-definition'>tensorArrayReadV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23342"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayReadV2"</span>
<a name="line-23343"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23344"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>flow_in</span>
<a name="line-23345"></a><span class='hs-comment'>{-
<a name="line-23346"></a>attr {
<a name="line-23347"></a>  description: "The type of the elem that is returned."
<a name="line-23348"></a>  name: "dtype"
<a name="line-23349"></a>  type: "type"
<a name="line-23350"></a>}
<a name="line-23351"></a>input_arg {
<a name="line-23352"></a>  description: "The handle to a TensorArray."
<a name="line-23353"></a>  name: "handle"
<a name="line-23354"></a>  type: DT_STRING
<a name="line-23355"></a>}
<a name="line-23356"></a>input_arg { name: "index" type: DT_INT32 }
<a name="line-23357"></a>input_arg {
<a name="line-23358"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-23359"></a>  name: "flow_in"
<a name="line-23360"></a>  type: DT_FLOAT
<a name="line-23361"></a>}
<a name="line-23362"></a>output_arg {
<a name="line-23363"></a>  description: "The tensor that is read from the TensorArray."
<a name="line-23364"></a>  name: "value"
<a name="line-23365"></a>  type_attr: "dtype"
<a name="line-23366"></a>}
<a name="line-23367"></a>-}</span>
<a name="line-23368"></a>
<a name="line-23369"></a><a name="decodeBase64"></a><span class='hs-comment'>-- | Decode web-safe base64-encoded strings.</span>
<a name="line-23370"></a><span class='hs-comment'>--</span>
<a name="line-23371"></a><span class='hs-comment'>-- Input may or may not have padding at the end. See EncodeBase64 for padding.</span>
<a name="line-23372"></a><span class='hs-comment'>-- Web-safe means that input must use - and _ instead of + and /.</span>
<a name="line-23373"></a><span class='hs-definition'>decodeBase64</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: Base64 strings to decode.</span>
<a name="line-23374"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__: Decoded strings.</span>
<a name="line-23375"></a><span class='hs-definition'>decodeBase64</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23376"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodeBase64"</span><span class='hs-layout'>)</span>
<a name="line-23377"></a>        <span class='hs-varid'>input</span>
<a name="line-23378"></a><span class='hs-comment'>{-
<a name="line-23379"></a>input_arg {
<a name="line-23380"></a>  description: "Base64 strings to decode."
<a name="line-23381"></a>  name: "input"
<a name="line-23382"></a>  type: DT_STRING
<a name="line-23383"></a>}
<a name="line-23384"></a>output_arg {
<a name="line-23385"></a>  description: "Decoded strings." name: "output" type: DT_STRING
<a name="line-23386"></a>}
<a name="line-23387"></a>-}</span>
<a name="line-23388"></a>
<a name="line-23389"></a><span class='hs-comment'>-- | Push an element onto the tensor_array.</span>
<a name="line-23390"></a>
<a name="line-23391"></a><a name="tensorArrayWriteV2"></a><span class='hs-definition'>tensorArrayWriteV2</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23392"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a TensorArray.</span>
<a name="line-23393"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __index__: The position to write to inside the TensorArray.</span>
<a name="line-23394"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: The tensor to write to the TensorArray.</span>
<a name="line-23395"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-23396"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_out__: A float scalar that enforces proper chaining of operations.</span>
<a name="line-23397"></a><span class='hs-definition'>tensorArrayWriteV2</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23398"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayWriteV2"</span>
<a name="line-23399"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23400"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>index</span> <span class='hs-varid'>value</span> <span class='hs-varid'>flow_in</span>
<a name="line-23401"></a><span class='hs-comment'>{-
<a name="line-23402"></a>attr { name: "T" type: "type" }
<a name="line-23403"></a>input_arg {
<a name="line-23404"></a>  description: "The handle to a TensorArray."
<a name="line-23405"></a>  name: "handle"
<a name="line-23406"></a>  type: DT_STRING
<a name="line-23407"></a>}
<a name="line-23408"></a>input_arg {
<a name="line-23409"></a>  description: "The position to write to inside the TensorArray."
<a name="line-23410"></a>  name: "index"
<a name="line-23411"></a>  type: DT_INT32
<a name="line-23412"></a>}
<a name="line-23413"></a>input_arg {
<a name="line-23414"></a>  description: "The tensor to write to the TensorArray."
<a name="line-23415"></a>  name: "value"
<a name="line-23416"></a>  type_attr: "T"
<a name="line-23417"></a>}
<a name="line-23418"></a>input_arg {
<a name="line-23419"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-23420"></a>  name: "flow_in"
<a name="line-23421"></a>  type: DT_FLOAT
<a name="line-23422"></a>}
<a name="line-23423"></a>output_arg {
<a name="line-23424"></a>  description: "A float scalar that enforces proper chaining of operations."
<a name="line-23425"></a>  name: "flow_out"
<a name="line-23426"></a>  type: DT_FLOAT
<a name="line-23427"></a>}
<a name="line-23428"></a>-}</span>
<a name="line-23429"></a>
<a name="line-23430"></a><a name="audioSummary"></a><span class='hs-comment'>-- | Outputs a `Summary` protocol buffer with audio.</span>
<a name="line-23431"></a><span class='hs-comment'>--</span>
<a name="line-23432"></a><span class='hs-comment'>-- The summary has up to `max_outputs` summary values containing audio. The</span>
<a name="line-23433"></a><span class='hs-comment'>-- audio is built from `tensor` which must be 3-D with shape `[batch_size,</span>
<a name="line-23434"></a><span class='hs-comment'>-- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are</span>
<a name="line-23435"></a><span class='hs-comment'>-- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.</span>
<a name="line-23436"></a><span class='hs-comment'>-- </span>
<a name="line-23437"></a><span class='hs-comment'>-- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to</span>
<a name="line-23438"></a><span class='hs-comment'>-- build the `tag` of the summary values:</span>
<a name="line-23439"></a><span class='hs-comment'>-- </span>
<a name="line-23440"></a><span class='hs-comment'>-- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.</span>
<a name="line-23441"></a><span class='hs-comment'>-- *  If `max_outputs` is greater than 1, the summary value tags are</span>
<a name="line-23442"></a><span class='hs-comment'>--    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.</span>
<a name="line-23443"></a><span class='hs-definition'>audioSummary</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __sample_rate__: The sample rate of the signal in hertz.</span>
<a name="line-23444"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.</span>
<a name="line-23445"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __tensor__: 2-D of shape `[batch_size, frames]`.</span>
<a name="line-23446"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.</span>
<a name="line-23447"></a><span class='hs-definition'>audioSummary</span> <span class='hs-varid'>sample_rate</span> <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23448"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AudioSummary"</span>
<a name="line-23449"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"sample_rate"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>sample_rate</span><span class='hs-layout'>)</span>
<a name="line-23450"></a>        <span class='hs-varid'>tag</span> <span class='hs-varid'>tensor</span>
<a name="line-23451"></a><span class='hs-comment'>{-
<a name="line-23452"></a>attr {
<a name="line-23453"></a>  description: "The sample rate of the signal in hertz."
<a name="line-23454"></a>  name: "sample_rate"
<a name="line-23455"></a>  type: "float"
<a name="line-23456"></a>}
<a name="line-23457"></a>attr {
<a name="line-23458"></a>  default_value { i: 3 }
<a name="line-23459"></a>  description: "Max number of batch elements to generate audio for."
<a name="line-23460"></a>  has_minimum: true
<a name="line-23461"></a>  minimum: 1
<a name="line-23462"></a>  name: "max_outputs"
<a name="line-23463"></a>  type: "int"
<a name="line-23464"></a>}
<a name="line-23465"></a>input_arg {
<a name="line-23466"></a>  description: "Scalar. Used to build the `tag` attribute of the summary values."
<a name="line-23467"></a>  name: "tag"
<a name="line-23468"></a>  type: DT_STRING
<a name="line-23469"></a>}
<a name="line-23470"></a>input_arg {
<a name="line-23471"></a>  description: "2-D of shape `[batch_size, frames]`."
<a name="line-23472"></a>  name: "tensor"
<a name="line-23473"></a>  type: DT_FLOAT
<a name="line-23474"></a>}
<a name="line-23475"></a>output_arg {
<a name="line-23476"></a>  description: "Scalar. Serialized `Summary` protocol buffer."
<a name="line-23477"></a>  name: "summary"
<a name="line-23478"></a>  type: DT_STRING
<a name="line-23479"></a>}
<a name="line-23480"></a>-}</span>
<a name="line-23481"></a>
<a name="line-23482"></a><a name="isFinite"></a><span class='hs-comment'>-- | Returns which elements of x are finite.</span>
<a name="line-23483"></a><span class='hs-comment'>--</span>
<a name="line-23484"></a><span class='hs-comment'>-- @compatibility(numpy)</span>
<a name="line-23485"></a><span class='hs-comment'>-- Equivalent to np.isfinite</span>
<a name="line-23486"></a><span class='hs-comment'>-- @end_compatibility</span>
<a name="line-23487"></a><span class='hs-definition'>isFinite</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23488"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23489"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-23490"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-23491"></a><span class='hs-definition'>isFinite</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23492"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"IsFinite"</span>
<a name="line-23493"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23494"></a>        <span class='hs-varid'>x</span>
<a name="line-23495"></a><span class='hs-comment'>{-
<a name="line-23496"></a>attr {
<a name="line-23497"></a>  allowed_values {
<a name="line-23498"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-23499"></a>  }
<a name="line-23500"></a>  name: "T"
<a name="line-23501"></a>  type: "type"
<a name="line-23502"></a>}
<a name="line-23503"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-23504"></a>output_arg { name: "y" type: DT_BOOL }
<a name="line-23505"></a>-}</span>
<a name="line-23506"></a>
<a name="line-23507"></a><span class='hs-comment'>-- | </span>
<a name="line-23508"></a>
<a name="line-23509"></a><a name="tensorArrayConcat"></a><span class='hs-definition'>tensorArrayConcat</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23510"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-23511"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-23512"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-23513"></a>                                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23514"></a>                     <span class='hs-comment'>-- ^ (__value__, __lengths__)</span>
<a name="line-23515"></a>                     <span class='hs-comment'>--</span>
<a name="line-23516"></a>                     <span class='hs-comment'>-- * __value__</span>
<a name="line-23517"></a>                     <span class='hs-comment'>--</span>
<a name="line-23518"></a>                     <span class='hs-comment'>-- * __lengths__</span>
<a name="line-23519"></a><span class='hs-definition'>tensorArrayConcat</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23520"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArrayConcat"</span>
<a name="line-23521"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23522"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-23523"></a><span class='hs-comment'>{-
<a name="line-23524"></a>attr { name: "dtype" type: "type" }
<a name="line-23525"></a>attr {
<a name="line-23526"></a>  default_value { shape { unknown_rank: true } }
<a name="line-23527"></a>  name: "element_shape_except0"
<a name="line-23528"></a>  type: "shape"
<a name="line-23529"></a>}
<a name="line-23530"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-23531"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-23532"></a>output_arg { name: "value" type_attr: "dtype" }
<a name="line-23533"></a>output_arg { name: "lengths" type: DT_INT64 }
<a name="line-23534"></a>-}</span>
<a name="line-23535"></a>
<a name="line-23536"></a><a name="sparseReduceSum"></a><span class='hs-comment'>-- | Computes the sum of elements across dimensions of a SparseTensor.</span>
<a name="line-23537"></a><span class='hs-comment'>--</span>
<a name="line-23538"></a><span class='hs-comment'>-- This Op takes a SparseTensor and is the sparse counterpart to</span>
<a name="line-23539"></a><span class='hs-comment'>-- `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`</span>
<a name="line-23540"></a><span class='hs-comment'>-- instead of a sparse one.</span>
<a name="line-23541"></a><span class='hs-comment'>-- </span>
<a name="line-23542"></a><span class='hs-comment'>-- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless</span>
<a name="line-23543"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-23544"></a><span class='hs-comment'>-- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained</span>
<a name="line-23545"></a><span class='hs-comment'>-- with length 1.</span>
<a name="line-23546"></a><span class='hs-comment'>-- </span>
<a name="line-23547"></a><span class='hs-comment'>-- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor</span>
<a name="line-23548"></a><span class='hs-comment'>-- with a single element is returned.  Additionally, the axes can be negative,</span>
<a name="line-23549"></a><span class='hs-comment'>-- which are interpreted according to the indexing rules in Python.</span>
<a name="line-23550"></a><span class='hs-definition'>sparseReduceSum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-23551"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23552"></a>                                                   <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23553"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23554"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23555"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-23556"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23557"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23558"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23559"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23560"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-23561"></a>                                            <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-23562"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.</span>
<a name="line-23563"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-23564"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.</span>
<a name="line-23565"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: `R-K`-D.  The reduced Tensor.</span>
<a name="line-23566"></a><span class='hs-definition'>sparseReduceSum</span> <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span>
<a name="line-23567"></a>                <span class='hs-varid'>reduction_axes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23568"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseReduceSum"</span>
<a name="line-23569"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23570"></a>        <span class='hs-varid'>input_indices</span> <span class='hs-varid'>input_values</span> <span class='hs-varid'>input_shape</span> <span class='hs-varid'>reduction_axes</span>
<a name="line-23571"></a><span class='hs-comment'>{-
<a name="line-23572"></a>attr {
<a name="line-23573"></a>  default_value { b: false }
<a name="line-23574"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-23575"></a>  name: "keep_dims"
<a name="line-23576"></a>  type: "bool"
<a name="line-23577"></a>}
<a name="line-23578"></a>attr {
<a name="line-23579"></a>  allowed_values {
<a name="line-23580"></a>    list {
<a name="line-23581"></a>      type: DT_FLOAT
<a name="line-23582"></a>      type: DT_DOUBLE
<a name="line-23583"></a>      type: DT_INT64
<a name="line-23584"></a>      type: DT_INT32
<a name="line-23585"></a>      type: DT_UINT8
<a name="line-23586"></a>      type: DT_UINT16
<a name="line-23587"></a>      type: DT_INT16
<a name="line-23588"></a>      type: DT_INT8
<a name="line-23589"></a>      type: DT_COMPLEX64
<a name="line-23590"></a>      type: DT_COMPLEX128
<a name="line-23591"></a>      type: DT_QINT8
<a name="line-23592"></a>      type: DT_QUINT8
<a name="line-23593"></a>      type: DT_QINT32
<a name="line-23594"></a>      type: DT_HALF
<a name="line-23595"></a>    }
<a name="line-23596"></a>  }
<a name="line-23597"></a>  name: "T"
<a name="line-23598"></a>  type: "type"
<a name="line-23599"></a>}
<a name="line-23600"></a>input_arg {
<a name="line-23601"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-23602"></a>  name: "input_indices"
<a name="line-23603"></a>  type: DT_INT64
<a name="line-23604"></a>}
<a name="line-23605"></a>input_arg {
<a name="line-23606"></a>  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
<a name="line-23607"></a>  name: "input_values"
<a name="line-23608"></a>  type_attr: "T"
<a name="line-23609"></a>}
<a name="line-23610"></a>input_arg {
<a name="line-23611"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-23612"></a>  name: "input_shape"
<a name="line-23613"></a>  type: DT_INT64
<a name="line-23614"></a>}
<a name="line-23615"></a>input_arg {
<a name="line-23616"></a>  description: "1-D.  Length-`K` vector containing the reduction axes."
<a name="line-23617"></a>  name: "reduction_axes"
<a name="line-23618"></a>  type: DT_INT32
<a name="line-23619"></a>}
<a name="line-23620"></a>output_arg {
<a name="line-23621"></a>  description: "`R-K`-D.  The reduced Tensor."
<a name="line-23622"></a>  name: "output"
<a name="line-23623"></a>  type_attr: "T"
<a name="line-23624"></a>}
<a name="line-23625"></a>-}</span>
<a name="line-23626"></a>
<a name="line-23627"></a><a name="realDiv"></a><span class='hs-comment'>-- | Returns x / y element-wise for real types.</span>
<a name="line-23628"></a><span class='hs-comment'>--</span>
<a name="line-23629"></a><span class='hs-comment'>-- If `x` and `y` are reals, this will return the floating-point division.</span>
<a name="line-23630"></a><span class='hs-comment'>-- </span>
<a name="line-23631"></a><span class='hs-comment'>-- *NOTE*: `Div` supports broadcasting. More about broadcasting</span>
<a name="line-23632"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-23633"></a><span class='hs-definition'>realDiv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-23634"></a>                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23635"></a>                                     <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23636"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23637"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23638"></a>                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23639"></a>                                     <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-23640"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-23641"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-23642"></a><span class='hs-definition'>realDiv</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23643"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RealDiv"</span>
<a name="line-23644"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23645"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-23646"></a><span class='hs-comment'>{-
<a name="line-23647"></a>attr {
<a name="line-23648"></a>  allowed_values {
<a name="line-23649"></a>    list {
<a name="line-23650"></a>      type: DT_HALF
<a name="line-23651"></a>      type: DT_FLOAT
<a name="line-23652"></a>      type: DT_DOUBLE
<a name="line-23653"></a>      type: DT_UINT8
<a name="line-23654"></a>      type: DT_INT8
<a name="line-23655"></a>      type: DT_UINT16
<a name="line-23656"></a>      type: DT_INT16
<a name="line-23657"></a>      type: DT_INT32
<a name="line-23658"></a>      type: DT_INT64
<a name="line-23659"></a>      type: DT_COMPLEX64
<a name="line-23660"></a>      type: DT_COMPLEX128
<a name="line-23661"></a>    }
<a name="line-23662"></a>  }
<a name="line-23663"></a>  name: "T"
<a name="line-23664"></a>  type: "type"
<a name="line-23665"></a>}
<a name="line-23666"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-23667"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-23668"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-23669"></a>-}</span>
<a name="line-23670"></a>
<a name="line-23671"></a><span class='hs-comment'>-- | </span>
<a name="line-23672"></a>
<a name="line-23673"></a><a name="tensorArraySize"></a><span class='hs-definition'>tensorArraySize</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__</span>
<a name="line-23674"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __flow_in__</span>
<a name="line-23675"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __size__</span>
<a name="line-23676"></a><span class='hs-definition'>tensorArraySize</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23677"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TensorArraySize"</span><span class='hs-layout'>)</span>
<a name="line-23678"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>flow_in</span>
<a name="line-23679"></a><span class='hs-comment'>{-
<a name="line-23680"></a>input_arg { is_ref: true name: "handle" type: DT_STRING }
<a name="line-23681"></a>input_arg { name: "flow_in" type: DT_FLOAT }
<a name="line-23682"></a>output_arg { name: "size" type: DT_INT32 }
<a name="line-23683"></a>-}</span>
<a name="line-23684"></a>
<a name="line-23685"></a><a name="biasAddV1"></a><span class='hs-comment'>-- | Adds `bias` to `value`.</span>
<a name="line-23686"></a><span class='hs-comment'>--</span>
<a name="line-23687"></a><span class='hs-comment'>-- This is a deprecated version of BiasAdd and will be soon removed.</span>
<a name="line-23688"></a><span class='hs-comment'>-- </span>
<a name="line-23689"></a><span class='hs-comment'>-- This is a special case of `tf.add` where `bias` is restricted to be 1-D.</span>
<a name="line-23690"></a><span class='hs-comment'>-- Broadcasting is supported, so `value` may have any number of dimensions.</span>
<a name="line-23691"></a><span class='hs-definition'>biasAddV1</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-23692"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23693"></a>                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-23694"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23695"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-23696"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-23697"></a>                                       <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23698"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: Any number of dimensions.</span>
<a name="line-23699"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __bias__: 1-D with size the last dimension of `value`.</span>
<a name="line-23700"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Broadcasted sum of `value` and `bias`.</span>
<a name="line-23701"></a><span class='hs-definition'>biasAddV1</span> <span class='hs-varid'>value</span> <span class='hs-varid'>bias</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23702"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BiasAddV1"</span>
<a name="line-23703"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23704"></a>        <span class='hs-varid'>value</span> <span class='hs-varid'>bias</span>
<a name="line-23705"></a><span class='hs-comment'>{-
<a name="line-23706"></a>attr {
<a name="line-23707"></a>  allowed_values {
<a name="line-23708"></a>    list {
<a name="line-23709"></a>      type: DT_FLOAT
<a name="line-23710"></a>      type: DT_DOUBLE
<a name="line-23711"></a>      type: DT_INT64
<a name="line-23712"></a>      type: DT_INT32
<a name="line-23713"></a>      type: DT_UINT8
<a name="line-23714"></a>      type: DT_UINT16
<a name="line-23715"></a>      type: DT_INT16
<a name="line-23716"></a>      type: DT_INT8
<a name="line-23717"></a>      type: DT_COMPLEX64
<a name="line-23718"></a>      type: DT_COMPLEX128
<a name="line-23719"></a>      type: DT_QINT8
<a name="line-23720"></a>      type: DT_QUINT8
<a name="line-23721"></a>      type: DT_QINT32
<a name="line-23722"></a>      type: DT_HALF
<a name="line-23723"></a>    }
<a name="line-23724"></a>  }
<a name="line-23725"></a>  name: "T"
<a name="line-23726"></a>  type: "type"
<a name="line-23727"></a>}
<a name="line-23728"></a>input_arg {
<a name="line-23729"></a>  description: "Any number of dimensions."
<a name="line-23730"></a>  name: "value"
<a name="line-23731"></a>  type_attr: "T"
<a name="line-23732"></a>}
<a name="line-23733"></a>input_arg {
<a name="line-23734"></a>  description: "1-D with size the last dimension of `value`."
<a name="line-23735"></a>  name: "bias"
<a name="line-23736"></a>  type_attr: "T"
<a name="line-23737"></a>}
<a name="line-23738"></a>output_arg {
<a name="line-23739"></a>  description: "Broadcasted sum of `value` and `bias`."
<a name="line-23740"></a>  name: "output"
<a name="line-23741"></a>  type_attr: "T"
<a name="line-23742"></a>}
<a name="line-23743"></a>-}</span>
<a name="line-23744"></a>
<a name="line-23745"></a><a name="logicalOr"></a><span class='hs-comment'>-- | Returns the truth value of x OR y element-wise.</span>
<a name="line-23746"></a><span class='hs-comment'>--</span>
<a name="line-23747"></a><span class='hs-comment'>-- *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting</span>
<a name="line-23748"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-23749"></a><span class='hs-definition'>logicalOr</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-23750"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-23751"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-23752"></a><span class='hs-definition'>logicalOr</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23753"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LogicalOr"</span><span class='hs-layout'>)</span>
<a name="line-23754"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-23755"></a><span class='hs-comment'>{-
<a name="line-23756"></a>input_arg { name: "x" type: DT_BOOL }
<a name="line-23757"></a>input_arg { name: "y" type: DT_BOOL }
<a name="line-23758"></a>output_arg { name: "z" type: DT_BOOL }
<a name="line-23759"></a>-}</span>
<a name="line-23760"></a>
<a name="line-23761"></a><span class='hs-comment'>-- | Push an element onto the stack.</span>
<a name="line-23762"></a>
<a name="line-23763"></a><a name="stackPush"></a><span class='hs-definition'>stackPush</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23764"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a stack.</span>
<a name="line-23765"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __elem__: The tensor to be pushed onto the stack.</span>
<a name="line-23766"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The same tensor as the input 'elem'.</span>
<a name="line-23767"></a><span class='hs-definition'>stackPush</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>elem</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23768"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StackPush"</span>
<a name="line-23769"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23770"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>elem</span>
<a name="line-23771"></a><span class='hs-comment'>{-
<a name="line-23772"></a>attr { name: "T" type: "type" }
<a name="line-23773"></a>attr {
<a name="line-23774"></a>  default_value { b: false }
<a name="line-23775"></a>  description: "Swap `elem` to CPU. Default to false."
<a name="line-23776"></a>  name: "swap_memory"
<a name="line-23777"></a>  type: "bool"
<a name="line-23778"></a>}
<a name="line-23779"></a>input_arg {
<a name="line-23780"></a>  description: "The handle to a stack."
<a name="line-23781"></a>  is_ref: true
<a name="line-23782"></a>  name: "handle"
<a name="line-23783"></a>  type: DT_STRING
<a name="line-23784"></a>}
<a name="line-23785"></a>input_arg {
<a name="line-23786"></a>  description: "The tensor to be pushed onto the stack."
<a name="line-23787"></a>  name: "elem"
<a name="line-23788"></a>  type_attr: "T"
<a name="line-23789"></a>}
<a name="line-23790"></a>output_arg {
<a name="line-23791"></a>  description: "The same tensor as the input \'elem\'."
<a name="line-23792"></a>  name: "output"
<a name="line-23793"></a>  type_attr: "T"
<a name="line-23794"></a>}
<a name="line-23795"></a>-}</span>
<a name="line-23796"></a>
<a name="line-23797"></a><span class='hs-comment'>-- | Computes Quantized Rectified Linear: `max(features, 0)`</span>
<a name="line-23798"></a>
<a name="line-23799"></a><a name="quantizedRelu"></a><span class='hs-definition'>quantizedRelu</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tinput</span> <span class='hs-varid'>out_type</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-23800"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23801"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23802"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23803"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>,</span>
<a name="line-23804"></a>                                                    <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span>
<a name="line-23805"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-23806"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23807"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-23808"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23809"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tinput</span> <span class='hs-comment'>-- ^ __features__</span>
<a name="line-23810"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __min_features__: The float value that the lowest quantized value represents.</span>
<a name="line-23811"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __max_features__: The float value that the highest quantized value represents.</span>
<a name="line-23812"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-23813"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-23814"></a>                 <span class='hs-comment'>-- ^ (__activations__, __min_activations__, __max_activations__)</span>
<a name="line-23815"></a>                 <span class='hs-comment'>--</span>
<a name="line-23816"></a>                 <span class='hs-comment'>-- * __activations__: Has the same output shape as "features".</span>
<a name="line-23817"></a>                 <span class='hs-comment'>--</span>
<a name="line-23818"></a>                 <span class='hs-comment'>-- * __min_activations__: The float value that the lowest quantized value represents.</span>
<a name="line-23819"></a>                 <span class='hs-comment'>--</span>
<a name="line-23820"></a>                 <span class='hs-comment'>-- * __max_activations__: The float value that the highest quantized value represents.</span>
<a name="line-23821"></a><span class='hs-definition'>quantizedRelu</span> <span class='hs-varid'>features</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23822"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"QuantizedRelu"</span>
<a name="line-23823"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tinput"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tinput</span><span class='hs-layout'>)</span>
<a name="line-23824"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_type"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_type</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23825"></a>        <span class='hs-varid'>features</span> <span class='hs-varid'>min_features</span> <span class='hs-varid'>max_features</span>
<a name="line-23826"></a><span class='hs-comment'>{-
<a name="line-23827"></a>attr {
<a name="line-23828"></a>  allowed_values {
<a name="line-23829"></a>    list {
<a name="line-23830"></a>      type: DT_QINT8
<a name="line-23831"></a>      type: DT_QUINT8
<a name="line-23832"></a>      type: DT_QINT16
<a name="line-23833"></a>      type: DT_QUINT16
<a name="line-23834"></a>      type: DT_QINT32
<a name="line-23835"></a>    }
<a name="line-23836"></a>  }
<a name="line-23837"></a>  name: "Tinput"
<a name="line-23838"></a>  type: "type"
<a name="line-23839"></a>}
<a name="line-23840"></a>attr {
<a name="line-23841"></a>  allowed_values {
<a name="line-23842"></a>    list {
<a name="line-23843"></a>      type: DT_QINT8
<a name="line-23844"></a>      type: DT_QUINT8
<a name="line-23845"></a>      type: DT_QINT16
<a name="line-23846"></a>      type: DT_QUINT16
<a name="line-23847"></a>      type: DT_QINT32
<a name="line-23848"></a>    }
<a name="line-23849"></a>  }
<a name="line-23850"></a>  default_value { type: DT_QUINT8 }
<a name="line-23851"></a>  name: "out_type"
<a name="line-23852"></a>  type: "type"
<a name="line-23853"></a>}
<a name="line-23854"></a>input_arg { name: "features" type_attr: "Tinput" }
<a name="line-23855"></a>input_arg {
<a name="line-23856"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-23857"></a>  name: "min_features"
<a name="line-23858"></a>  type: DT_FLOAT
<a name="line-23859"></a>}
<a name="line-23860"></a>input_arg {
<a name="line-23861"></a>  description: "The float value that the highest quantized value represents."
<a name="line-23862"></a>  name: "max_features"
<a name="line-23863"></a>  type: DT_FLOAT
<a name="line-23864"></a>}
<a name="line-23865"></a>output_arg {
<a name="line-23866"></a>  description: "Has the same output shape as \"features\"."
<a name="line-23867"></a>  name: "activations"
<a name="line-23868"></a>  type_attr: "out_type"
<a name="line-23869"></a>}
<a name="line-23870"></a>output_arg {
<a name="line-23871"></a>  description: "The float value that the lowest quantized value represents."
<a name="line-23872"></a>  name: "min_activations"
<a name="line-23873"></a>  type: DT_FLOAT
<a name="line-23874"></a>}
<a name="line-23875"></a>output_arg {
<a name="line-23876"></a>  description: "The float value that the highest quantized value represents."
<a name="line-23877"></a>  name: "max_activations"
<a name="line-23878"></a>  type: DT_FLOAT
<a name="line-23879"></a>}
<a name="line-23880"></a>-}</span>
<a name="line-23881"></a>
<a name="line-23882"></a><a name="broadcastGradientArgs"></a><span class='hs-comment'>-- | Return the reduction indices for computing gradients of s0 op s1 with broadcast.</span>
<a name="line-23883"></a><span class='hs-comment'>--</span>
<a name="line-23884"></a><span class='hs-comment'>-- This is typically used by gradient computations for a broadcasting operation.</span>
<a name="line-23885"></a><span class='hs-definition'>broadcastGradientArgs</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23886"></a>                                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23887"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __s0__</span>
<a name="line-23888"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __s1__</span>
<a name="line-23889"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-23890"></a>                         <span class='hs-comment'>-- ^ (__r0__, __r1__)</span>
<a name="line-23891"></a>                         <span class='hs-comment'>--</span>
<a name="line-23892"></a>                         <span class='hs-comment'>-- * __r0__</span>
<a name="line-23893"></a>                         <span class='hs-comment'>--</span>
<a name="line-23894"></a>                         <span class='hs-comment'>-- * __r1__</span>
<a name="line-23895"></a><span class='hs-definition'>broadcastGradientArgs</span> <span class='hs-varid'>s0</span> <span class='hs-varid'>s1</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23896"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BroadcastGradientArgs"</span>
<a name="line-23897"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23898"></a>        <span class='hs-varid'>s0</span> <span class='hs-varid'>s1</span>
<a name="line-23899"></a><span class='hs-comment'>{-
<a name="line-23900"></a>attr {
<a name="line-23901"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-23902"></a>  default_value { type: DT_INT32 }
<a name="line-23903"></a>  name: "T"
<a name="line-23904"></a>  type: "type"
<a name="line-23905"></a>}
<a name="line-23906"></a>input_arg { name: "s0" type_attr: "T" }
<a name="line-23907"></a>input_arg { name: "s1" type_attr: "T" }
<a name="line-23908"></a>output_arg { name: "r0" type_attr: "T" }
<a name="line-23909"></a>output_arg { name: "r1" type_attr: "T" }
<a name="line-23910"></a>-}</span>
<a name="line-23911"></a>
<a name="line-23912"></a><a name="uniqueWithCounts"></a><span class='hs-comment'>-- | Finds unique elements in a 1-D tensor.</span>
<a name="line-23913"></a><span class='hs-comment'>--</span>
<a name="line-23914"></a><span class='hs-comment'>-- This operation returns a tensor `y` containing all of the unique elements of `x`</span>
<a name="line-23915"></a><span class='hs-comment'>-- sorted in the same order that they occur in `x`. This operation also returns a</span>
<a name="line-23916"></a><span class='hs-comment'>-- tensor `idx` the same size as `x` that contains the index of each value of `x`</span>
<a name="line-23917"></a><span class='hs-comment'>-- in the unique output `y`. Finally, it returns a third tensor `count` that</span>
<a name="line-23918"></a><span class='hs-comment'>-- contains the count of each element of `y` in `x`. In other words:</span>
<a name="line-23919"></a><span class='hs-comment'>-- </span>
<a name="line-23920"></a><span class='hs-comment'>-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`</span>
<a name="line-23921"></a><span class='hs-comment'>-- </span>
<a name="line-23922"></a><span class='hs-comment'>-- For example:</span>
<a name="line-23923"></a><span class='hs-comment'>-- </span>
<a name="line-23924"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-23925"></a><span class='hs-comment'>-- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]</span>
<a name="line-23926"></a><span class='hs-comment'>-- y, idx, count = unique_with_counts(x)</span>
<a name="line-23927"></a><span class='hs-comment'>-- y ==&gt; [1, 2, 4, 7, 8]</span>
<a name="line-23928"></a><span class='hs-comment'>-- idx ==&gt; [0, 0, 1, 2, 2, 2, 3, 4, 4]</span>
<a name="line-23929"></a><span class='hs-comment'>-- count ==&gt; [2, 1, 3, 1, 2]</span>
<a name="line-23930"></a><span class='hs-comment'>-- ```</span>
<a name="line-23931"></a><span class='hs-definition'>uniqueWithCounts</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varid'>out_idx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>,</span>
<a name="line-23932"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23933"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23934"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: 1-D.</span>
<a name="line-23935"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>,</span>
<a name="line-23936"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ (__y__, __idx__, __count__)</span>
<a name="line-23937"></a>                    <span class='hs-comment'>--</span>
<a name="line-23938"></a>                    <span class='hs-comment'>-- * __y__: 1-D.</span>
<a name="line-23939"></a>                    <span class='hs-comment'>--</span>
<a name="line-23940"></a>                    <span class='hs-comment'>-- * __idx__: 1-D.</span>
<a name="line-23941"></a>                    <span class='hs-comment'>--</span>
<a name="line-23942"></a>                    <span class='hs-comment'>-- * __count__: 1-D.</span>
<a name="line-23943"></a><span class='hs-definition'>uniqueWithCounts</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23944"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"UniqueWithCounts"</span>
<a name="line-23945"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-23946"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"out_idx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>out_idx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23947"></a>        <span class='hs-varid'>x</span>
<a name="line-23948"></a><span class='hs-comment'>{-
<a name="line-23949"></a>attr { name: "T" type: "type" }
<a name="line-23950"></a>attr {
<a name="line-23951"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-23952"></a>  default_value { type: DT_INT32 }
<a name="line-23953"></a>  name: "out_idx"
<a name="line-23954"></a>  type: "type"
<a name="line-23955"></a>}
<a name="line-23956"></a>input_arg { description: "1-D." name: "x" type_attr: "T" }
<a name="line-23957"></a>output_arg { description: "1-D." name: "y" type_attr: "T" }
<a name="line-23958"></a>output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
<a name="line-23959"></a>output_arg {
<a name="line-23960"></a>  description: "1-D." name: "count" type_attr: "out_idx"
<a name="line-23961"></a>}
<a name="line-23962"></a>-}</span>
<a name="line-23963"></a>
<a name="line-23964"></a><a name="truncateMod"></a><span class='hs-comment'>-- | Returns element-wise remainder of division. This emulates C semantics where</span>
<a name="line-23965"></a><span class='hs-comment'>--</span>
<a name="line-23966"></a><span class='hs-comment'>-- true, this follows C semantics in that the result here is consistent</span>
<a name="line-23967"></a><span class='hs-comment'>-- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.</span>
<a name="line-23968"></a><span class='hs-comment'>-- </span>
<a name="line-23969"></a><span class='hs-comment'>-- *NOTE*: `Mod` supports broadcasting. More about broadcasting</span>
<a name="line-23970"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-23971"></a><span class='hs-definition'>truncateMod</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-23972"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-23973"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-23974"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-23975"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-23976"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-23977"></a><span class='hs-definition'>truncateMod</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-23978"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TruncateMod"</span>
<a name="line-23979"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-23980"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-23981"></a><span class='hs-comment'>{-
<a name="line-23982"></a>attr {
<a name="line-23983"></a>  allowed_values {
<a name="line-23984"></a>    list {
<a name="line-23985"></a>      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
<a name="line-23986"></a>    }
<a name="line-23987"></a>  }
<a name="line-23988"></a>  name: "T"
<a name="line-23989"></a>  type: "type"
<a name="line-23990"></a>}
<a name="line-23991"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-23992"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-23993"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-23994"></a>-}</span>
<a name="line-23995"></a>
<a name="line-23996"></a><a name="stridedSliceGrad"></a><span class='hs-comment'>-- | Returns the gradient of `StridedSlice`.</span>
<a name="line-23997"></a><span class='hs-comment'>--</span>
<a name="line-23998"></a><span class='hs-comment'>-- Since `StridedSlice` cuts out pieces of its `input` which is size</span>
<a name="line-23999"></a><span class='hs-comment'>-- `shape`, its gradient will have the same shape (which is passed here</span>
<a name="line-24000"></a><span class='hs-comment'>-- as `shape`). The gradient will be zero in any element that the slice</span>
<a name="line-24001"></a><span class='hs-comment'>-- does not select.</span>
<a name="line-24002"></a><span class='hs-comment'>-- </span>
<a name="line-24003"></a><span class='hs-comment'>-- Arguments are the same as StridedSliceGrad with the exception that</span>
<a name="line-24004"></a><span class='hs-comment'>-- `dy` is the input gradient to be propagated and `shape` is the</span>
<a name="line-24005"></a><span class='hs-comment'>-- shape of `StridedSlice`'s `input`.</span>
<a name="line-24006"></a><span class='hs-definition'>stridedSliceGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-varid'>index</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24007"></a>                                                     <span class='hs-conid'>TensorType</span> <span class='hs-varid'>index</span><span class='hs-layout'>,</span>
<a name="line-24008"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24009"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24010"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __shape__</span>
<a name="line-24011"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __begin__</span>
<a name="line-24012"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __end__</span>
<a name="line-24013"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>index</span> <span class='hs-comment'>-- ^ __strides__</span>
<a name="line-24014"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __dy__</span>
<a name="line-24015"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-24016"></a><span class='hs-definition'>stridedSliceGrad</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span> <span class='hs-varid'>dy</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24017"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"StridedSliceGrad"</span>
<a name="line-24018"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-24019"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Index"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>index</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24020"></a>        <span class='hs-varid'>shape</span> <span class='hs-varid'>begin</span> <span class='hs-varid'>end</span> <span class='hs-varid'>strides</span> <span class='hs-varid'>dy</span>
<a name="line-24021"></a><span class='hs-comment'>{-
<a name="line-24022"></a>attr { name: "T" type: "type" }
<a name="line-24023"></a>attr {
<a name="line-24024"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-24025"></a>  name: "Index"
<a name="line-24026"></a>  type: "type"
<a name="line-24027"></a>}
<a name="line-24028"></a>attr { default_value { i: 0 } name: "begin_mask" type: "int" }
<a name="line-24029"></a>attr { default_value { i: 0 } name: "end_mask" type: "int" }
<a name="line-24030"></a>attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
<a name="line-24031"></a>attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
<a name="line-24032"></a>attr {
<a name="line-24033"></a>  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
<a name="line-24034"></a>}
<a name="line-24035"></a>input_arg { name: "shape" type_attr: "Index" }
<a name="line-24036"></a>input_arg { name: "begin" type_attr: "Index" }
<a name="line-24037"></a>input_arg { name: "end" type_attr: "Index" }
<a name="line-24038"></a>input_arg { name: "strides" type_attr: "Index" }
<a name="line-24039"></a>input_arg { name: "dy" type_attr: "T" }
<a name="line-24040"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-24041"></a>-}</span>
<a name="line-24042"></a>
<a name="line-24043"></a><a name="fractionalAvgPool"></a><span class='hs-comment'>-- | Performs fractional average pooling on the input.</span>
<a name="line-24044"></a><span class='hs-comment'>--</span>
<a name="line-24045"></a><span class='hs-comment'>-- Fractional average pooling is similar to Fractional max pooling in the pooling</span>
<a name="line-24046"></a><span class='hs-comment'>-- region generation step. The only difference is that after pooling regions are</span>
<a name="line-24047"></a><span class='hs-comment'>-- generated, a mean operation is performed instead of a max operation in each</span>
<a name="line-24048"></a><span class='hs-comment'>-- pooling region.</span>
<a name="line-24049"></a><span class='hs-definition'>fractionalAvgPool</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24050"></a>                                                          <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24051"></a>                                                          <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24052"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __value__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-24053"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24054"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span>
<a name="line-24055"></a>                     <span class='hs-comment'>-- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)</span>
<a name="line-24056"></a>                     <span class='hs-comment'>--</span>
<a name="line-24057"></a>                     <span class='hs-comment'>-- * __output__: output tensor after fractional avg pooling.</span>
<a name="line-24058"></a>                     <span class='hs-comment'>--</span>
<a name="line-24059"></a>                     <span class='hs-comment'>-- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.</span>
<a name="line-24060"></a>                     <span class='hs-comment'>--</span>
<a name="line-24061"></a>                     <span class='hs-comment'>-- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.</span>
<a name="line-24062"></a><span class='hs-definition'>fractionalAvgPool</span> <span class='hs-varid'>value</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24063"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FractionalAvgPool"</span>
<a name="line-24064"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24065"></a>        <span class='hs-varid'>value</span>
<a name="line-24066"></a><span class='hs-comment'>{-
<a name="line-24067"></a>attr {
<a name="line-24068"></a>  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be &gt;= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
<a name="line-24069"></a>  has_minimum: true
<a name="line-24070"></a>  minimum: 4
<a name="line-24071"></a>  name: "pooling_ratio"
<a name="line-24072"></a>  type: "list(float)"
<a name="line-24073"></a>}
<a name="line-24074"></a>attr {
<a name="line-24075"></a>  default_value { b: false }
<a name="line-24076"></a>  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](<a href="http://arxiv.org/abs/1412.6071)">http://arxiv.org/abs/1412.6071)</a> for\ndifference between pseudorandom and random."
<a name="line-24077"></a>  name: "pseudo_random"
<a name="line-24078"></a>  type: "bool"
<a name="line-24079"></a>}
<a name="line-24080"></a>attr {
<a name="line-24081"></a>  default_value { b: false }
<a name="line-24082"></a>  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
<a name="line-24083"></a>  name: "overlapping"
<a name="line-24084"></a>  type: "bool"
<a name="line-24085"></a>}
<a name="line-24086"></a>attr {
<a name="line-24087"></a>  default_value { b: false }
<a name="line-24088"></a>  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic."
<a name="line-24089"></a>  name: "deterministic"
<a name="line-24090"></a>  type: "bool"
<a name="line-24091"></a>}
<a name="line-24092"></a>attr {
<a name="line-24093"></a>  default_value { i: 0 }
<a name="line-24094"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-24095"></a>  name: "seed"
<a name="line-24096"></a>  type: "int"
<a name="line-24097"></a>}
<a name="line-24098"></a>attr {
<a name="line-24099"></a>  default_value { i: 0 }
<a name="line-24100"></a>  description: "An second seed to avoid seed collision."
<a name="line-24101"></a>  name: "seed2"
<a name="line-24102"></a>  type: "int"
<a name="line-24103"></a>}
<a name="line-24104"></a>attr {
<a name="line-24105"></a>  allowed_values {
<a name="line-24106"></a>    list {
<a name="line-24107"></a>      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
<a name="line-24108"></a>    }
<a name="line-24109"></a>  }
<a name="line-24110"></a>  name: "T"
<a name="line-24111"></a>  type: "type"
<a name="line-24112"></a>}
<a name="line-24113"></a>input_arg {
<a name="line-24114"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-24115"></a>  name: "value"
<a name="line-24116"></a>  type_attr: "T"
<a name="line-24117"></a>}
<a name="line-24118"></a>output_arg {
<a name="line-24119"></a>  description: "output tensor after fractional avg pooling."
<a name="line-24120"></a>  name: "output"
<a name="line-24121"></a>  type_attr: "T"
<a name="line-24122"></a>}
<a name="line-24123"></a>output_arg {
<a name="line-24124"></a>  description: "row pooling sequence, needed to calculate gradient."
<a name="line-24125"></a>  name: "row_pooling_sequence"
<a name="line-24126"></a>  type: DT_INT64
<a name="line-24127"></a>}
<a name="line-24128"></a>output_arg {
<a name="line-24129"></a>  description: "column pooling sequence, needed to calculate gradient."
<a name="line-24130"></a>  name: "col_pooling_sequence"
<a name="line-24131"></a>  type: DT_INT64
<a name="line-24132"></a>}
<a name="line-24133"></a>-}</span>
<a name="line-24134"></a>
<a name="line-24135"></a><a name="sparseAccumulatorTakeGradient"></a><span class='hs-comment'>-- | Extracts the average sparse gradient in the given SparseConditionalAccumulator,</span>
<a name="line-24136"></a><span class='hs-comment'>--</span>
<a name="line-24137"></a><span class='hs-comment'>-- provided that sufficient (i.e., more than num_required) gradients have been</span>
<a name="line-24138"></a><span class='hs-comment'>-- accumulated. The op will blocks until sufficient gradients have been</span>
<a name="line-24139"></a><span class='hs-comment'>-- accumulated. If the accumulator has already aggregated more than num_required</span>
<a name="line-24140"></a><span class='hs-comment'>-- gradients, it will return its average of the accumulated gradients.</span>
<a name="line-24141"></a><span class='hs-comment'>-- Also automatically increments the recorded global_step in the accumulator by 1,</span>
<a name="line-24142"></a><span class='hs-comment'>-- and resets the aggregate to 0.</span>
<a name="line-24143"></a><span class='hs-definition'>sparseAccumulatorTakeGradient</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-24144"></a>                                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24145"></a>                                                            <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24146"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-24147"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24148"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24149"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-24150"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-24151"></a>                                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-24152"></a>                                                            <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24153"></a>                                                            <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24154"></a>                                 <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to a SparseConditionalAccumulator.</span>
<a name="line-24155"></a>                                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_required__: Number of gradients required before we return an aggregate.</span>
<a name="line-24156"></a>                                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24157"></a>                                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-24158"></a>                                            <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24159"></a>                                 <span class='hs-comment'>-- ^ (__indices__, __values__, __shape__)</span>
<a name="line-24160"></a>                                 <span class='hs-comment'>--</span>
<a name="line-24161"></a>                                 <span class='hs-comment'>-- * __indices__: Indices of the average of the accumulated sparse gradients.</span>
<a name="line-24162"></a>                                 <span class='hs-comment'>--</span>
<a name="line-24163"></a>                                 <span class='hs-comment'>-- * __values__: Values of the average of the accumulated sparse gradients.</span>
<a name="line-24164"></a>                                 <span class='hs-comment'>--</span>
<a name="line-24165"></a>                                 <span class='hs-comment'>-- * __shape__: Shape of the average of the accumulated sparse gradients.</span>
<a name="line-24166"></a><span class='hs-definition'>sparseAccumulatorTakeGradient</span> <span class='hs-varid'>handle</span> <span class='hs-varid'>num_required</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24167"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseAccumulatorTakeGradient"</span>
<a name="line-24168"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24169"></a>        <span class='hs-varid'>handle</span> <span class='hs-varid'>num_required</span>
<a name="line-24170"></a><span class='hs-comment'>{-
<a name="line-24171"></a>attr {
<a name="line-24172"></a>  allowed_values {
<a name="line-24173"></a>    list {
<a name="line-24174"></a>      type: DT_FLOAT
<a name="line-24175"></a>      type: DT_DOUBLE
<a name="line-24176"></a>      type: DT_INT64
<a name="line-24177"></a>      type: DT_INT32
<a name="line-24178"></a>      type: DT_UINT8
<a name="line-24179"></a>      type: DT_UINT16
<a name="line-24180"></a>      type: DT_INT16
<a name="line-24181"></a>      type: DT_INT8
<a name="line-24182"></a>      type: DT_COMPLEX64
<a name="line-24183"></a>      type: DT_COMPLEX128
<a name="line-24184"></a>      type: DT_QINT8
<a name="line-24185"></a>      type: DT_QUINT8
<a name="line-24186"></a>      type: DT_QINT32
<a name="line-24187"></a>      type: DT_HALF
<a name="line-24188"></a>    }
<a name="line-24189"></a>  }
<a name="line-24190"></a>  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
<a name="line-24191"></a>  name: "dtype"
<a name="line-24192"></a>  type: "type"
<a name="line-24193"></a>}
<a name="line-24194"></a>input_arg {
<a name="line-24195"></a>  description: "The handle to a SparseConditionalAccumulator."
<a name="line-24196"></a>  is_ref: true
<a name="line-24197"></a>  name: "handle"
<a name="line-24198"></a>  type: DT_STRING
<a name="line-24199"></a>}
<a name="line-24200"></a>input_arg {
<a name="line-24201"></a>  description: "Number of gradients required before we return an aggregate."
<a name="line-24202"></a>  name: "num_required"
<a name="line-24203"></a>  type: DT_INT32
<a name="line-24204"></a>}
<a name="line-24205"></a>output_arg {
<a name="line-24206"></a>  description: "Indices of the average of the accumulated sparse gradients."
<a name="line-24207"></a>  name: "indices"
<a name="line-24208"></a>  type: DT_INT64
<a name="line-24209"></a>}
<a name="line-24210"></a>output_arg {
<a name="line-24211"></a>  description: "Values of the average of the accumulated sparse gradients."
<a name="line-24212"></a>  name: "values"
<a name="line-24213"></a>  type_attr: "dtype"
<a name="line-24214"></a>}
<a name="line-24215"></a>output_arg {
<a name="line-24216"></a>  description: "Shape of the average of the accumulated sparse gradients."
<a name="line-24217"></a>  name: "shape"
<a name="line-24218"></a>  type: DT_INT64
<a name="line-24219"></a>}
<a name="line-24220"></a>-}</span>
<a name="line-24221"></a>
<a name="line-24222"></a><a name="decodeJSONExample"></a><span class='hs-comment'>-- | Convert JSON-encoded Example records to binary protocol buffer strings.</span>
<a name="line-24223"></a><span class='hs-comment'>--</span>
<a name="line-24224"></a><span class='hs-comment'>-- This op translates a tensor containing Example records, encoded using</span>
<a name="line-24225"></a><span class='hs-comment'>-- the [standard JSON</span>
<a name="line-24226"></a><span class='hs-comment'>-- mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),</span>
<a name="line-24227"></a><span class='hs-comment'>-- into a tensor containing the same records encoded as binary protocol</span>
<a name="line-24228"></a><span class='hs-comment'>-- buffers. The resulting tensor can then be fed to any of the other</span>
<a name="line-24229"></a><span class='hs-comment'>-- Example-parsing ops.</span>
<a name="line-24230"></a><span class='hs-definition'>decodeJSONExample</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __json_examples__: Each string is a JSON object serialized according to the JSON</span>
<a name="line-24231"></a>                                                          <span class='hs-comment'>-- mapping of the Example proto.</span>
<a name="line-24232"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding</span>
<a name="line-24233"></a>                     <span class='hs-comment'>-- to the respective element of `json_examples`.</span>
<a name="line-24234"></a><span class='hs-definition'>decodeJSONExample</span> <span class='hs-varid'>json_examples</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24235"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodeJSONExample"</span><span class='hs-layout'>)</span>
<a name="line-24236"></a>        <span class='hs-varid'>json_examples</span>
<a name="line-24237"></a><span class='hs-comment'>{-
<a name="line-24238"></a>input_arg {
<a name="line-24239"></a>  description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto."
<a name="line-24240"></a>  name: "json_examples"
<a name="line-24241"></a>  type: DT_STRING
<a name="line-24242"></a>}
<a name="line-24243"></a>output_arg {
<a name="line-24244"></a>  description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`."
<a name="line-24245"></a>  name: "binary_examples"
<a name="line-24246"></a>  type: DT_STRING
<a name="line-24247"></a>}
<a name="line-24248"></a>-}</span>
<a name="line-24249"></a>
<a name="line-24250"></a><span class='hs-comment'>-- | A placeholder op that passes though `input` when its output is not fed.</span>
<a name="line-24251"></a>
<a name="line-24252"></a><a name="placeholderWithDefault"></a><span class='hs-definition'>placeholderWithDefault</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24253"></a>                          <span class='hs-conid'>Shape</span> <span class='hs-comment'>-- ^ __shape__: The (possibly partial) shape of the tensor.</span>
<a name="line-24254"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __input__: The default value to produce when `output` is not fed.</span>
<a name="line-24255"></a>                          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span> <span class='hs-comment'>-- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed.</span>
<a name="line-24256"></a><span class='hs-definition'>placeholderWithDefault</span> <span class='hs-varid'>shape</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24257"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"PlaceholderWithDefault"</span>
<a name="line-24258"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-24259"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"shape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>shape</span><span class='hs-layout'>)</span>
<a name="line-24260"></a>        <span class='hs-varid'>input</span>
<a name="line-24261"></a><span class='hs-comment'>{-
<a name="line-24262"></a>attr {
<a name="line-24263"></a>  description: "The type of elements in the tensor."
<a name="line-24264"></a>  name: "dtype"
<a name="line-24265"></a>  type: "type"
<a name="line-24266"></a>}
<a name="line-24267"></a>attr {
<a name="line-24268"></a>  description: "The (possibly partial) shape of the tensor."
<a name="line-24269"></a>  name: "shape"
<a name="line-24270"></a>  type: "shape"
<a name="line-24271"></a>}
<a name="line-24272"></a>input_arg {
<a name="line-24273"></a>  description: "The default value to produce when `output` is not fed."
<a name="line-24274"></a>  name: "input"
<a name="line-24275"></a>  type_attr: "dtype"
<a name="line-24276"></a>}
<a name="line-24277"></a>output_arg {
<a name="line-24278"></a>  description: "A placeholder tensor that defaults to `input` if it is not fed."
<a name="line-24279"></a>  name: "output"
<a name="line-24280"></a>  type_attr: "dtype"
<a name="line-24281"></a>}
<a name="line-24282"></a>-}</span>
<a name="line-24283"></a>
<a name="line-24284"></a><a name="applyFtrl"></a><span class='hs-comment'>-- | Update '*var' according to the Ftrl-proximal scheme.</span>
<a name="line-24285"></a><span class='hs-comment'>--</span>
<a name="line-24286"></a><span class='hs-comment'>-- accum_new = accum + grad * grad</span>
<a name="line-24287"></a><span class='hs-comment'>-- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var</span>
<a name="line-24288"></a><span class='hs-comment'>-- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2</span>
<a name="line-24289"></a><span class='hs-comment'>-- var = (sign(linear) * l1 - linear) / quadratic if |linear| &gt; l1 else 0.0</span>
<a name="line-24290"></a><span class='hs-comment'>-- accum = accum_new</span>
<a name="line-24291"></a><span class='hs-definition'>applyFtrl</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24292"></a>                                        <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24293"></a>                                                <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24294"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24295"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-24296"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-24297"></a>                                                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24298"></a>                                                <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24299"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __var__: Should be from a Variable().</span>
<a name="line-24300"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __accum__: Should be from a Variable().</span>
<a name="line-24301"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __linear__: Should be from a Variable().</span>
<a name="line-24302"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: The gradient.</span>
<a name="line-24303"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr__: Scaling factor. Must be a scalar.</span>
<a name="line-24304"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l1__: L1 regulariation. Must be a scalar.</span>
<a name="line-24305"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __l2__: L2 regulariation. Must be a scalar.</span>
<a name="line-24306"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __lr_power__: Scaling factor. Must be a scalar.</span>
<a name="line-24307"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __out__: Same as "var".</span>
<a name="line-24308"></a><span class='hs-definition'>applyFtrl</span> <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>linear</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>lr_power</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24309"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ApplyFtrl"</span>
<a name="line-24310"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24311"></a>        <span class='hs-varid'>var</span> <span class='hs-varid'>accum</span> <span class='hs-varid'>linear</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>lr</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>lr_power</span>
<a name="line-24312"></a><span class='hs-comment'>{-
<a name="line-24313"></a>attr {
<a name="line-24314"></a>  allowed_values {
<a name="line-24315"></a>    list {
<a name="line-24316"></a>      type: DT_FLOAT
<a name="line-24317"></a>      type: DT_DOUBLE
<a name="line-24318"></a>      type: DT_INT64
<a name="line-24319"></a>      type: DT_INT32
<a name="line-24320"></a>      type: DT_UINT8
<a name="line-24321"></a>      type: DT_UINT16
<a name="line-24322"></a>      type: DT_INT16
<a name="line-24323"></a>      type: DT_INT8
<a name="line-24324"></a>      type: DT_COMPLEX64
<a name="line-24325"></a>      type: DT_COMPLEX128
<a name="line-24326"></a>      type: DT_QINT8
<a name="line-24327"></a>      type: DT_QUINT8
<a name="line-24328"></a>      type: DT_QINT32
<a name="line-24329"></a>      type: DT_HALF
<a name="line-24330"></a>    }
<a name="line-24331"></a>  }
<a name="line-24332"></a>  name: "T"
<a name="line-24333"></a>  type: "type"
<a name="line-24334"></a>}
<a name="line-24335"></a>attr {
<a name="line-24336"></a>  default_value { b: false }
<a name="line-24337"></a>  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
<a name="line-24338"></a>  name: "use_locking"
<a name="line-24339"></a>  type: "bool"
<a name="line-24340"></a>}
<a name="line-24341"></a>input_arg {
<a name="line-24342"></a>  description: "Should be from a Variable()."
<a name="line-24343"></a>  is_ref: true
<a name="line-24344"></a>  name: "var"
<a name="line-24345"></a>  type_attr: "T"
<a name="line-24346"></a>}
<a name="line-24347"></a>input_arg {
<a name="line-24348"></a>  description: "Should be from a Variable()."
<a name="line-24349"></a>  is_ref: true
<a name="line-24350"></a>  name: "accum"
<a name="line-24351"></a>  type_attr: "T"
<a name="line-24352"></a>}
<a name="line-24353"></a>input_arg {
<a name="line-24354"></a>  description: "Should be from a Variable()."
<a name="line-24355"></a>  is_ref: true
<a name="line-24356"></a>  name: "linear"
<a name="line-24357"></a>  type_attr: "T"
<a name="line-24358"></a>}
<a name="line-24359"></a>input_arg {
<a name="line-24360"></a>  description: "The gradient." name: "grad" type_attr: "T"
<a name="line-24361"></a>}
<a name="line-24362"></a>input_arg {
<a name="line-24363"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-24364"></a>  name: "lr"
<a name="line-24365"></a>  type_attr: "T"
<a name="line-24366"></a>}
<a name="line-24367"></a>input_arg {
<a name="line-24368"></a>  description: "L1 regulariation. Must be a scalar."
<a name="line-24369"></a>  name: "l1"
<a name="line-24370"></a>  type_attr: "T"
<a name="line-24371"></a>}
<a name="line-24372"></a>input_arg {
<a name="line-24373"></a>  description: "L2 regulariation. Must be a scalar."
<a name="line-24374"></a>  name: "l2"
<a name="line-24375"></a>  type_attr: "T"
<a name="line-24376"></a>}
<a name="line-24377"></a>input_arg {
<a name="line-24378"></a>  description: "Scaling factor. Must be a scalar."
<a name="line-24379"></a>  name: "lr_power"
<a name="line-24380"></a>  type_attr: "T"
<a name="line-24381"></a>}
<a name="line-24382"></a>output_arg {
<a name="line-24383"></a>  description: "Same as \"var\"."
<a name="line-24384"></a>  is_ref: true
<a name="line-24385"></a>  name: "out"
<a name="line-24386"></a>  type_attr: "T"
<a name="line-24387"></a>}
<a name="line-24388"></a>-}</span>
<a name="line-24389"></a>
<a name="line-24390"></a><span class='hs-comment'>-- | Applies L1 regularization shrink step on the parameters.</span>
<a name="line-24391"></a>
<a name="line-24392"></a><a name="sdcaShrinkL1"></a><span class='hs-definition'>sdcaShrinkL1</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __l1__: Symmetric l1 regularization strength.</span>
<a name="line-24393"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float.</span>
<a name="line-24394"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __weights__: a list of vectors where each value is the weight associated with a</span>
<a name="line-24395"></a>                                      <span class='hs-comment'>-- feature group.</span>
<a name="line-24396"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-24397"></a><span class='hs-definition'>sdcaShrinkL1</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span>
<a name="line-24398"></a>             <span class='hs-varid'>weights</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"num_features"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"weights"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>weights</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24399"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SdcaShrinkL1"</span>
<a name="line-24400"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"l1"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>l1</span>
<a name="line-24401"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"l2"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>l2</span>
<a name="line-24402"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_features"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_features</span><span class='hs-layout'>)</span>
<a name="line-24403"></a>        <span class='hs-varid'>weights</span>
<a name="line-24404"></a>  <span class='hs-keyword'>where</span>
<a name="line-24405"></a>    <span class='hs-varid'>num_features</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>weights</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-24406"></a><span class='hs-comment'>{-
<a name="line-24407"></a>attr {
<a name="line-24408"></a>  description: "Number of feature groups to apply shrinking step."
<a name="line-24409"></a>  has_minimum: true
<a name="line-24410"></a>  name: "num_features"
<a name="line-24411"></a>  type: "int"
<a name="line-24412"></a>}
<a name="line-24413"></a>attr {
<a name="line-24414"></a>  description: "Symmetric l1 regularization strength."
<a name="line-24415"></a>  name: "l1"
<a name="line-24416"></a>  type: "float"
<a name="line-24417"></a>}
<a name="line-24418"></a>attr {
<a name="line-24419"></a>  description: "Symmetric l2 regularization strength. Should be a positive float."
<a name="line-24420"></a>  name: "l2"
<a name="line-24421"></a>  type: "float"
<a name="line-24422"></a>}
<a name="line-24423"></a>input_arg {
<a name="line-24424"></a>  description: "a list of vectors where each value is the weight associated with a\nfeature group."
<a name="line-24425"></a>  is_ref: true
<a name="line-24426"></a>  name: "weights"
<a name="line-24427"></a>  number_attr: "num_features"
<a name="line-24428"></a>  type: DT_FLOAT
<a name="line-24429"></a>}
<a name="line-24430"></a>-}</span>
<a name="line-24431"></a>
<a name="line-24432"></a><a name="shardedFilename"></a><span class='hs-comment'>-- | Generate a sharded filename. The filename is printf formatted as</span>
<a name="line-24433"></a><span class='hs-comment'>--</span>
<a name="line-24434"></a><span class='hs-comment'>--    %s-%05d-of-%05d, basename, shard, num_shards.</span>
<a name="line-24435"></a><span class='hs-definition'>shardedFilename</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __basename__</span>
<a name="line-24436"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __shard__</span>
<a name="line-24437"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __num_shards__</span>
<a name="line-24438"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __filename__</span>
<a name="line-24439"></a><span class='hs-definition'>shardedFilename</span> <span class='hs-varid'>basename</span> <span class='hs-varid'>shard</span> <span class='hs-varid'>num_shards</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24440"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ShardedFilename"</span><span class='hs-layout'>)</span>
<a name="line-24441"></a>        <span class='hs-varid'>basename</span> <span class='hs-varid'>shard</span> <span class='hs-varid'>num_shards</span>
<a name="line-24442"></a><span class='hs-comment'>{-
<a name="line-24443"></a>input_arg { name: "basename" type: DT_STRING }
<a name="line-24444"></a>input_arg { name: "shard" type: DT_INT32 }
<a name="line-24445"></a>input_arg { name: "num_shards" type: DT_INT32 }
<a name="line-24446"></a>output_arg { name: "filename" type: DT_STRING }
<a name="line-24447"></a>-}</span>
<a name="line-24448"></a>
<a name="line-24449"></a><a name="fakeQuantWithMinMaxArgs"></a><span class='hs-comment'>-- | Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.</span>
<a name="line-24450"></a><span class='hs-comment'>--</span>
<a name="line-24451"></a><span class='hs-comment'>-- Attributes [min; max] define the clamping range for the 'inputs' data.  Op</span>
<a name="line-24452"></a><span class='hs-comment'>-- divides this range into 255 steps (total of 256 values), then replaces each</span>
<a name="line-24453"></a><span class='hs-comment'>-- 'inputs' value with the closest of the quantized step values.</span>
<a name="line-24454"></a><span class='hs-comment'>-- </span>
<a name="line-24455"></a><span class='hs-comment'>-- Quantization is called fake since the output is still in floating point.</span>
<a name="line-24456"></a><span class='hs-definition'>fakeQuantWithMinMaxArgs</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__</span>
<a name="line-24457"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __outputs__</span>
<a name="line-24458"></a><span class='hs-definition'>fakeQuantWithMinMaxArgs</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24459"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FakeQuantWithMinMaxArgs"</span><span class='hs-layout'>)</span>
<a name="line-24460"></a>        <span class='hs-varid'>inputs</span>
<a name="line-24461"></a><span class='hs-comment'>{-
<a name="line-24462"></a>attr { default_value { f: -6.0 } name: "min" type: "float" }
<a name="line-24463"></a>attr { default_value { f: 6.0 } name: "max" type: "float" }
<a name="line-24464"></a>input_arg { name: "inputs" type: DT_FLOAT }
<a name="line-24465"></a>output_arg { name: "outputs" type: DT_FLOAT }
<a name="line-24466"></a>-}</span>
<a name="line-24467"></a>
<a name="line-24468"></a><a name="scatterNdAdd"></a><span class='hs-comment'>-- | Applies sparse addition between `updates` and individual values or slices</span>
<a name="line-24469"></a><span class='hs-comment'>--</span>
<a name="line-24470"></a><span class='hs-comment'>-- within a given variable according to `indices`.</span>
<a name="line-24471"></a><span class='hs-comment'>-- </span>
<a name="line-24472"></a><span class='hs-comment'>-- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.</span>
<a name="line-24473"></a><span class='hs-comment'>-- </span>
<a name="line-24474"></a><span class='hs-comment'>-- `indices` must be integer tensor, containing indices into `ref`.</span>
<a name="line-24475"></a><span class='hs-comment'>-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt; K &lt;= P`.</span>
<a name="line-24476"></a><span class='hs-comment'>-- </span>
<a name="line-24477"></a><span class='hs-comment'>-- The innermost dimension of `indices` (with length `K`) corresponds to</span>
<a name="line-24478"></a><span class='hs-comment'>-- indices into elements (if `K = P`) or slices (if `K &lt; P`) along the `K`th</span>
<a name="line-24479"></a><span class='hs-comment'>-- dimension of `ref`.</span>
<a name="line-24480"></a><span class='hs-comment'>-- </span>
<a name="line-24481"></a><span class='hs-comment'>-- `updates` is `Tensor` of rank `Q-1+P-K` with shape:</span>
<a name="line-24482"></a><span class='hs-comment'>-- </span>
<a name="line-24483"></a><span class='hs-comment'>-- ```</span>
<a name="line-24484"></a><span class='hs-comment'>-- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].</span>
<a name="line-24485"></a><span class='hs-comment'>-- ```</span>
<a name="line-24486"></a><span class='hs-comment'>-- </span>
<a name="line-24487"></a><span class='hs-comment'>-- For example, say we want to add 4 scattered elements to a rank-1 tensor to 8</span>
<a name="line-24488"></a><span class='hs-comment'>-- elements. In Python, that addition would look like this:</span>
<a name="line-24489"></a><span class='hs-comment'>-- </span>
<a name="line-24490"></a><span class='hs-comment'>--     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])</span>
<a name="line-24491"></a><span class='hs-comment'>--     indices = tf.constant([[4], [3], [1], [7]])</span>
<a name="line-24492"></a><span class='hs-comment'>--     updates = tf.constant([9, 10, 11, 12])</span>
<a name="line-24493"></a><span class='hs-comment'>--     add = tf.scatter_nd_add(ref, indices, updates)</span>
<a name="line-24494"></a><span class='hs-comment'>--     with tf.Session() as sess:</span>
<a name="line-24495"></a><span class='hs-comment'>--       print sess.run(add)</span>
<a name="line-24496"></a><span class='hs-comment'>-- </span>
<a name="line-24497"></a><span class='hs-comment'>-- The resulting update to ref would look like this:</span>
<a name="line-24498"></a><span class='hs-comment'>-- </span>
<a name="line-24499"></a><span class='hs-comment'>--     [1, 13, 3, 14, 14, 6, 7, 20]</span>
<a name="line-24500"></a><span class='hs-comment'>-- </span>
<a name="line-24501"></a><span class='hs-comment'>-- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to</span>
<a name="line-24502"></a><span class='hs-comment'>-- slices.</span>
<a name="line-24503"></a><span class='hs-definition'>scatterNdAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24504"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24505"></a>                                                   <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24506"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-24507"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24508"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24509"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-24510"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-24511"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24512"></a>                                                   <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24513"></a>                                           <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-24514"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24515"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24516"></a>                <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: A mutable Tensor. Should be from a Variable node.</span>
<a name="line-24517"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.</span>
<a name="line-24518"></a>                                      <span class='hs-comment'>-- A tensor of indices into ref.</span>
<a name="line-24519"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values</span>
<a name="line-24520"></a>                               <span class='hs-comment'>-- to add to ref.</span>
<a name="line-24521"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want</span>
<a name="line-24522"></a>                <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-24523"></a><span class='hs-definition'>scatterNdAdd</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24524"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterNdAdd"</span>
<a name="line-24525"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-24526"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24527"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-24528"></a><span class='hs-comment'>{-
<a name="line-24529"></a>attr {
<a name="line-24530"></a>  allowed_values {
<a name="line-24531"></a>    list {
<a name="line-24532"></a>      type: DT_FLOAT
<a name="line-24533"></a>      type: DT_DOUBLE
<a name="line-24534"></a>      type: DT_INT64
<a name="line-24535"></a>      type: DT_INT32
<a name="line-24536"></a>      type: DT_UINT8
<a name="line-24537"></a>      type: DT_UINT16
<a name="line-24538"></a>      type: DT_INT16
<a name="line-24539"></a>      type: DT_INT8
<a name="line-24540"></a>      type: DT_COMPLEX64
<a name="line-24541"></a>      type: DT_COMPLEX128
<a name="line-24542"></a>      type: DT_QINT8
<a name="line-24543"></a>      type: DT_QUINT8
<a name="line-24544"></a>      type: DT_QINT32
<a name="line-24545"></a>      type: DT_HALF
<a name="line-24546"></a>    }
<a name="line-24547"></a>  }
<a name="line-24548"></a>  name: "T"
<a name="line-24549"></a>  type: "type"
<a name="line-24550"></a>}
<a name="line-24551"></a>attr {
<a name="line-24552"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-24553"></a>  name: "Tindices"
<a name="line-24554"></a>  type: "type"
<a name="line-24555"></a>}
<a name="line-24556"></a>attr {
<a name="line-24557"></a>  default_value { b: false }
<a name="line-24558"></a>  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
<a name="line-24559"></a>  name: "use_locking"
<a name="line-24560"></a>  type: "bool"
<a name="line-24561"></a>}
<a name="line-24562"></a>input_arg {
<a name="line-24563"></a>  description: "A mutable Tensor. Should be from a Variable node."
<a name="line-24564"></a>  is_ref: true
<a name="line-24565"></a>  name: "ref"
<a name="line-24566"></a>  type_attr: "T"
<a name="line-24567"></a>}
<a name="line-24568"></a>input_arg {
<a name="line-24569"></a>  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
<a name="line-24570"></a>  name: "indices"
<a name="line-24571"></a>  type_attr: "Tindices"
<a name="line-24572"></a>}
<a name="line-24573"></a>input_arg {
<a name="line-24574"></a>  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref."
<a name="line-24575"></a>  name: "updates"
<a name="line-24576"></a>  type_attr: "T"
<a name="line-24577"></a>}
<a name="line-24578"></a>output_arg {
<a name="line-24579"></a>  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-24580"></a>  is_ref: true
<a name="line-24581"></a>  name: "output_ref"
<a name="line-24582"></a>  type_attr: "T"
<a name="line-24583"></a>}
<a name="line-24584"></a>-}</span>
<a name="line-24585"></a>
<a name="line-24586"></a><span class='hs-comment'>-- | Returns the number of gradients aggregated in the given accumulators.</span>
<a name="line-24587"></a>
<a name="line-24588"></a><a name="accumulatorNumAccumulated"></a><span class='hs-definition'>accumulatorNumAccumulated</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __handle__: The handle to an accumulator.</span>
<a name="line-24589"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator.</span>
<a name="line-24590"></a><span class='hs-definition'>accumulatorNumAccumulated</span> <span class='hs-varid'>handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24591"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AccumulatorNumAccumulated"</span><span class='hs-layout'>)</span>
<a name="line-24592"></a>        <span class='hs-varid'>handle</span>
<a name="line-24593"></a><span class='hs-comment'>{-
<a name="line-24594"></a>input_arg {
<a name="line-24595"></a>  description: "The handle to an accumulator."
<a name="line-24596"></a>  is_ref: true
<a name="line-24597"></a>  name: "handle"
<a name="line-24598"></a>  type: DT_STRING
<a name="line-24599"></a>}
<a name="line-24600"></a>output_arg {
<a name="line-24601"></a>  description: "The number of gradients aggregated in the given accumulator."
<a name="line-24602"></a>  name: "num_accumulated"
<a name="line-24603"></a>  type: DT_INT32
<a name="line-24604"></a>}
<a name="line-24605"></a>-}</span>
<a name="line-24606"></a>
<a name="line-24607"></a><a name="sparseSegmentSqrtN"></a><span class='hs-comment'>-- | Computes the sum along sparse segments of a tensor divided by the sqrt of N.</span>
<a name="line-24608"></a><span class='hs-comment'>--</span>
<a name="line-24609"></a><span class='hs-comment'>-- N is the size of the segment being reduced.</span>
<a name="line-24610"></a><span class='hs-comment'>-- </span>
<a name="line-24611"></a><span class='hs-comment'>-- Read [the section on</span>
<a name="line-24612"></a><span class='hs-comment'>-- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation</span>
<a name="line-24613"></a><span class='hs-comment'>-- of segments.</span>
<a name="line-24614"></a><span class='hs-definition'>sparseSegmentSqrtN</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24615"></a>                                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24616"></a>                                                <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-24617"></a>                                                <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24618"></a>                                                        <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24619"></a>                      <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-24620"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.</span>
<a name="line-24621"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.</span>
<a name="line-24622"></a>                      <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-24623"></a>                      <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-24624"></a><span class='hs-definition'>sparseSegmentSqrtN</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24625"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSegmentSqrtN"</span>
<a name="line-24626"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-24627"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24628"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span>
<a name="line-24629"></a><span class='hs-comment'>{-
<a name="line-24630"></a>attr {
<a name="line-24631"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-24632"></a>  name: "T"
<a name="line-24633"></a>  type: "type"
<a name="line-24634"></a>}
<a name="line-24635"></a>attr {
<a name="line-24636"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-24637"></a>  default_value { type: DT_INT32 }
<a name="line-24638"></a>  name: "Tidx"
<a name="line-24639"></a>  type: "type"
<a name="line-24640"></a>}
<a name="line-24641"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-24642"></a>input_arg {
<a name="line-24643"></a>  description: "A 1-D tensor. Has same rank as `segment_ids`."
<a name="line-24644"></a>  name: "indices"
<a name="line-24645"></a>  type_attr: "Tidx"
<a name="line-24646"></a>}
<a name="line-24647"></a>input_arg {
<a name="line-24648"></a>  description: "A 1-D tensor. Values should be sorted and can be repeated."
<a name="line-24649"></a>  name: "segment_ids"
<a name="line-24650"></a>  type: DT_INT32
<a name="line-24651"></a>}
<a name="line-24652"></a>output_arg {
<a name="line-24653"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-24654"></a>  name: "output"
<a name="line-24655"></a>  type_attr: "T"
<a name="line-24656"></a>}
<a name="line-24657"></a>-}</span>
<a name="line-24658"></a>
<a name="line-24659"></a><a name="depthToSpace"></a><span class='hs-comment'>-- | DepthToSpace for tensors of type T.</span>
<a name="line-24660"></a><span class='hs-comment'>--</span>
<a name="line-24661"></a><span class='hs-comment'>-- Rearranges data from depth into blocks of spatial data.</span>
<a name="line-24662"></a><span class='hs-comment'>-- This is the reverse transformation of SpaceToDepth. More specifically,</span>
<a name="line-24663"></a><span class='hs-comment'>-- this op outputs a copy of the input tensor where values from the `depth`</span>
<a name="line-24664"></a><span class='hs-comment'>-- dimension are moved in spatial blocks to the `height` and `width` dimensions.</span>
<a name="line-24665"></a><span class='hs-comment'>-- The attr `block_size` indicates the input block size and how the data is moved.</span>
<a name="line-24666"></a><span class='hs-comment'>-- </span>
<a name="line-24667"></a><span class='hs-comment'>--   * Chunks of data of size `block_size * block_size` from depth are rearranged</span>
<a name="line-24668"></a><span class='hs-comment'>--     into non-overlapping blocks of size `block_size x block_size`</span>
<a name="line-24669"></a><span class='hs-comment'>--   * The width the output tensor is `input_depth * block_size`, whereas the</span>
<a name="line-24670"></a><span class='hs-comment'>--     height is `input_height * block_size`.</span>
<a name="line-24671"></a><span class='hs-comment'>--   * The depth of the input tensor must be divisible by</span>
<a name="line-24672"></a><span class='hs-comment'>--     `block_size * block_size`.</span>
<a name="line-24673"></a><span class='hs-comment'>-- </span>
<a name="line-24674"></a><span class='hs-comment'>-- That is, assuming the input is in the shape:</span>
<a name="line-24675"></a><span class='hs-comment'>-- `[batch, height, width, depth]`,</span>
<a name="line-24676"></a><span class='hs-comment'>-- the shape of the output will be:</span>
<a name="line-24677"></a><span class='hs-comment'>-- `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`</span>
<a name="line-24678"></a><span class='hs-comment'>-- </span>
<a name="line-24679"></a><span class='hs-comment'>-- This operation requires that the input tensor be of rank 4, and that</span>
<a name="line-24680"></a><span class='hs-comment'>-- `block_size` be &gt;=1 and that `block_size * block_size` be a divisor of the</span>
<a name="line-24681"></a><span class='hs-comment'>-- input depth.</span>
<a name="line-24682"></a><span class='hs-comment'>-- </span>
<a name="line-24683"></a><span class='hs-comment'>-- This operation is useful for resizing the activations between convolutions</span>
<a name="line-24684"></a><span class='hs-comment'>-- (but keeping all data), e.g. instead of pooling. It is also useful for training</span>
<a name="line-24685"></a><span class='hs-comment'>-- purely convolutional models.</span>
<a name="line-24686"></a><span class='hs-comment'>-- </span>
<a name="line-24687"></a><span class='hs-comment'>-- For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:</span>
<a name="line-24688"></a><span class='hs-comment'>-- </span>
<a name="line-24689"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24690"></a><span class='hs-comment'>-- x = [[[[1, 2, 3, 4]]]]</span>
<a name="line-24691"></a><span class='hs-comment'>-- </span>
<a name="line-24692"></a><span class='hs-comment'>-- ```</span>
<a name="line-24693"></a><span class='hs-comment'>-- </span>
<a name="line-24694"></a><span class='hs-comment'>-- This operation will output a tensor of shape `[1, 2, 2, 1]`:</span>
<a name="line-24695"></a><span class='hs-comment'>-- </span>
<a name="line-24696"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24697"></a><span class='hs-comment'>--    [[[[1], [2]],</span>
<a name="line-24698"></a><span class='hs-comment'>--      [[3], [4]]]]</span>
<a name="line-24699"></a><span class='hs-comment'>-- ```</span>
<a name="line-24700"></a><span class='hs-comment'>-- </span>
<a name="line-24701"></a><span class='hs-comment'>-- Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,</span>
<a name="line-24702"></a><span class='hs-comment'>-- the corresponding output will have 2x2 elements and will have a depth of</span>
<a name="line-24703"></a><span class='hs-comment'>-- 1 channel (1 = `4 / (block_size * block_size)`).</span>
<a name="line-24704"></a><span class='hs-comment'>-- The output element shape is `[2, 2, 1]`.</span>
<a name="line-24705"></a><span class='hs-comment'>-- </span>
<a name="line-24706"></a><span class='hs-comment'>-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.</span>
<a name="line-24707"></a><span class='hs-comment'>-- </span>
<a name="line-24708"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24709"></a><span class='hs-comment'>-- x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]</span>
<a name="line-24710"></a><span class='hs-comment'>-- ```</span>
<a name="line-24711"></a><span class='hs-comment'>-- </span>
<a name="line-24712"></a><span class='hs-comment'>-- This operation, for block size of 2, will return the following tensor of shape</span>
<a name="line-24713"></a><span class='hs-comment'>-- `[1, 2, 2, 3]`</span>
<a name="line-24714"></a><span class='hs-comment'>-- </span>
<a name="line-24715"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24716"></a><span class='hs-comment'>--    [[[[1, 2, 3], [4, 5, 6]],</span>
<a name="line-24717"></a><span class='hs-comment'>--      [[7, 8, 9], [10, 11, 12]]]]</span>
<a name="line-24718"></a><span class='hs-comment'>-- </span>
<a name="line-24719"></a><span class='hs-comment'>-- ```</span>
<a name="line-24720"></a><span class='hs-comment'>-- </span>
<a name="line-24721"></a><span class='hs-comment'>-- Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:</span>
<a name="line-24722"></a><span class='hs-comment'>-- </span>
<a name="line-24723"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24724"></a><span class='hs-comment'>-- x =  [[[[1, 2, 3, 4],</span>
<a name="line-24725"></a><span class='hs-comment'>--        [5, 6, 7, 8]],</span>
<a name="line-24726"></a><span class='hs-comment'>--       [[9, 10, 11, 12],</span>
<a name="line-24727"></a><span class='hs-comment'>--        [13, 14, 15, 16]]]]</span>
<a name="line-24728"></a><span class='hs-comment'>-- ```</span>
<a name="line-24729"></a><span class='hs-comment'>-- </span>
<a name="line-24730"></a><span class='hs-comment'>-- the operator will return the following tensor of shape `[1 4 4 1]`:</span>
<a name="line-24731"></a><span class='hs-comment'>-- </span>
<a name="line-24732"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-24733"></a><span class='hs-comment'>-- x = [[ [1],   [2],  [5],  [6]],</span>
<a name="line-24734"></a><span class='hs-comment'>--      [ [3],   [4],  [7],  [8]],</span>
<a name="line-24735"></a><span class='hs-comment'>--      [ [9],  [10], [13],  [14]],</span>
<a name="line-24736"></a><span class='hs-comment'>--      [ [11], [12], [15],  [16]]]</span>
<a name="line-24737"></a><span class='hs-comment'>-- </span>
<a name="line-24738"></a><span class='hs-comment'>-- ```</span>
<a name="line-24739"></a><span class='hs-definition'>depthToSpace</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24740"></a>                <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __block_size__: The size of the spatial block, same as in Space2Depth.</span>
<a name="line-24741"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-24742"></a>                <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-24743"></a><span class='hs-definition'>depthToSpace</span> <span class='hs-varid'>block_size</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24744"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DepthToSpace"</span>
<a name="line-24745"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-24746"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"block_size"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>block_size</span><span class='hs-layout'>)</span>
<a name="line-24747"></a>        <span class='hs-varid'>input</span>
<a name="line-24748"></a><span class='hs-comment'>{-
<a name="line-24749"></a>attr { name: "T" type: "type" }
<a name="line-24750"></a>attr {
<a name="line-24751"></a>  description: "The size of the spatial block, same as in Space2Depth."
<a name="line-24752"></a>  has_minimum: true
<a name="line-24753"></a>  minimum: 2
<a name="line-24754"></a>  name: "block_size"
<a name="line-24755"></a>  type: "int"
<a name="line-24756"></a>}
<a name="line-24757"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-24758"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-24759"></a>-}</span>
<a name="line-24760"></a>
<a name="line-24761"></a><a name="allCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a learned unigram distribution.</span>
<a name="line-24762"></a><span class='hs-comment'>--</span>
<a name="line-24763"></a><span class='hs-comment'>-- See explanations of candidate sampling and the data formats at</span>
<a name="line-24764"></a><span class='hs-comment'>-- go/candidate-sampling.</span>
<a name="line-24765"></a><span class='hs-comment'>-- </span>
<a name="line-24766"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-24767"></a><span class='hs-comment'>-- </span>
<a name="line-24768"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-24769"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-24770"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-24771"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-24772"></a><span class='hs-definition'>allCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to produce per batch.</span>
<a name="line-24773"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-24774"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-24775"></a>                               <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-24776"></a>                               <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-24777"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-24778"></a>                                                   <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-24779"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span>
<a name="line-24780"></a>                           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-24781"></a>                       <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-24782"></a>                       <span class='hs-comment'>--</span>
<a name="line-24783"></a>                       <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-24784"></a>                       <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-24785"></a>                       <span class='hs-comment'>--</span>
<a name="line-24786"></a>                       <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-24787"></a>                       <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-24788"></a>                       <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-24789"></a>                       <span class='hs-comment'>--</span>
<a name="line-24790"></a>                       <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-24791"></a>                       <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-24792"></a>                       <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-24793"></a>                       <span class='hs-comment'>-- probability.</span>
<a name="line-24794"></a><span class='hs-definition'>allCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>unique</span>
<a name="line-24795"></a>                    <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24796"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"AllCandidateSampler"</span>
<a name="line-24797"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-24798"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-24799"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-24800"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-24801"></a><span class='hs-comment'>{-
<a name="line-24802"></a>attr {
<a name="line-24803"></a>  description: "Number of true labels per context."
<a name="line-24804"></a>  has_minimum: true
<a name="line-24805"></a>  minimum: 1
<a name="line-24806"></a>  name: "num_true"
<a name="line-24807"></a>  type: "int"
<a name="line-24808"></a>}
<a name="line-24809"></a>attr {
<a name="line-24810"></a>  description: "Number of candidates to produce per batch."
<a name="line-24811"></a>  has_minimum: true
<a name="line-24812"></a>  minimum: 1
<a name="line-24813"></a>  name: "num_sampled"
<a name="line-24814"></a>  type: "int"
<a name="line-24815"></a>}
<a name="line-24816"></a>attr {
<a name="line-24817"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-24818"></a>  name: "unique"
<a name="line-24819"></a>  type: "bool"
<a name="line-24820"></a>}
<a name="line-24821"></a>attr {
<a name="line-24822"></a>  default_value { i: 0 }
<a name="line-24823"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-24824"></a>  name: "seed"
<a name="line-24825"></a>  type: "int"
<a name="line-24826"></a>}
<a name="line-24827"></a>attr {
<a name="line-24828"></a>  default_value { i: 0 }
<a name="line-24829"></a>  description: "An second seed to avoid seed collision."
<a name="line-24830"></a>  name: "seed2"
<a name="line-24831"></a>  type: "int"
<a name="line-24832"></a>}
<a name="line-24833"></a>input_arg {
<a name="line-24834"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-24835"></a>  name: "true_classes"
<a name="line-24836"></a>  type: DT_INT64
<a name="line-24837"></a>}
<a name="line-24838"></a>output_arg {
<a name="line-24839"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-24840"></a>  name: "sampled_candidates"
<a name="line-24841"></a>  type: DT_INT64
<a name="line-24842"></a>}
<a name="line-24843"></a>output_arg {
<a name="line-24844"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-24845"></a>  name: "true_expected_count"
<a name="line-24846"></a>  type: DT_FLOAT
<a name="line-24847"></a>}
<a name="line-24848"></a>output_arg {
<a name="line-24849"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-24850"></a>  name: "sampled_expected_count"
<a name="line-24851"></a>  type: DT_FLOAT
<a name="line-24852"></a>}
<a name="line-24853"></a>-}</span>
<a name="line-24854"></a>
<a name="line-24855"></a><span class='hs-comment'>-- | Computes the gradient of nearest neighbor interpolation.</span>
<a name="line-24856"></a>
<a name="line-24857"></a><a name="resizeNearestNeighborGrad"></a><span class='hs-definition'>resizeNearestNeighborGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-24858"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24859"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-24860"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-24861"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24862"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24863"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-24864"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The</span>
<a name="line-24865"></a>                                                         <span class='hs-comment'>-- original input size.</span>
<a name="line-24866"></a>                             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients</span>
<a name="line-24867"></a>                             <span class='hs-comment'>-- with respect to the input image.</span>
<a name="line-24868"></a><span class='hs-definition'>resizeNearestNeighborGrad</span> <span class='hs-varid'>grads</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24869"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeNearestNeighborGrad"</span>
<a name="line-24870"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24871"></a>        <span class='hs-varid'>grads</span> <span class='hs-varid'>size</span>
<a name="line-24872"></a><span class='hs-comment'>{-
<a name="line-24873"></a>attr {
<a name="line-24874"></a>  allowed_values {
<a name="line-24875"></a>    list {
<a name="line-24876"></a>      type: DT_UINT8
<a name="line-24877"></a>      type: DT_INT8
<a name="line-24878"></a>      type: DT_INT32
<a name="line-24879"></a>      type: DT_HALF
<a name="line-24880"></a>      type: DT_FLOAT
<a name="line-24881"></a>      type: DT_DOUBLE
<a name="line-24882"></a>    }
<a name="line-24883"></a>  }
<a name="line-24884"></a>  name: "T"
<a name="line-24885"></a>  type: "type"
<a name="line-24886"></a>}
<a name="line-24887"></a>attr {
<a name="line-24888"></a>  default_value { b: false }
<a name="line-24889"></a>  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
<a name="line-24890"></a>  name: "align_corners"
<a name="line-24891"></a>  type: "bool"
<a name="line-24892"></a>}
<a name="line-24893"></a>input_arg {
<a name="line-24894"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-24895"></a>  name: "grads"
<a name="line-24896"></a>  type_attr: "T"
<a name="line-24897"></a>}
<a name="line-24898"></a>input_arg {
<a name="line-24899"></a>  description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size."
<a name="line-24900"></a>  name: "size"
<a name="line-24901"></a>  type: DT_INT32
<a name="line-24902"></a>}
<a name="line-24903"></a>output_arg {
<a name="line-24904"></a>  description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image."
<a name="line-24905"></a>  name: "output"
<a name="line-24906"></a>  type_attr: "T"
<a name="line-24907"></a>}
<a name="line-24908"></a>-}</span>
<a name="line-24909"></a>
<a name="line-24910"></a><a name="cTCGreedyDecoder"></a><span class='hs-comment'>-- | Performs greedy decoding on the logits given in inputs.</span>
<a name="line-24911"></a><span class='hs-comment'>--</span>
<a name="line-24912"></a><span class='hs-comment'>-- A note about the attribute merge_repeated: if enabled, when</span>
<a name="line-24913"></a><span class='hs-comment'>-- consecutive logits' maximum indices are the same, only the first of</span>
<a name="line-24914"></a><span class='hs-comment'>-- these is emitted.  Labeling the blank '*', the sequence "A B B * B B"</span>
<a name="line-24915"></a><span class='hs-comment'>-- becomes "A B" if merge_repeated = True and "A B B B B" if</span>
<a name="line-24916"></a><span class='hs-comment'>-- merge_repeated = False.</span>
<a name="line-24917"></a><span class='hs-comment'>-- </span>
<a name="line-24918"></a><span class='hs-comment'>-- Regardless of the value of merge_repeated, if the maximum index of a given</span>
<a name="line-24919"></a><span class='hs-comment'>-- time and batch corresponds to the blank, index `(num_classes - 1)`, no new</span>
<a name="line-24920"></a><span class='hs-comment'>-- element is emitted.</span>
<a name="line-24921"></a><span class='hs-definition'>cTCGreedyDecoder</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.</span>
<a name="line-24922"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`.</span>
<a name="line-24923"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24924"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-24925"></a>                        <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-24926"></a>                    <span class='hs-comment'>-- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)</span>
<a name="line-24927"></a>                    <span class='hs-comment'>--</span>
<a name="line-24928"></a>                    <span class='hs-comment'>-- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`,</span>
<a name="line-24929"></a>                    <span class='hs-comment'>-- of a `SparseTensor&lt;int64, 2&gt;`.  The rows store: [batch, time].</span>
<a name="line-24930"></a>                    <span class='hs-comment'>--</span>
<a name="line-24931"></a>                    <span class='hs-comment'>-- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`,</span>
<a name="line-24932"></a>                    <span class='hs-comment'>-- of a `SparseTensor&lt;int64, 2&gt;`.  The vector stores the decoded classes.</span>
<a name="line-24933"></a>                    <span class='hs-comment'>--</span>
<a name="line-24934"></a>                    <span class='hs-comment'>-- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor.</span>
<a name="line-24935"></a>                    <span class='hs-comment'>-- Values are: `[batch_size, max_decoded_length]`.</span>
<a name="line-24936"></a>                    <span class='hs-comment'>--</span>
<a name="line-24937"></a>                    <span class='hs-comment'>-- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence</span>
<a name="line-24938"></a>                    <span class='hs-comment'>-- log-probabilities.</span>
<a name="line-24939"></a><span class='hs-definition'>cTCGreedyDecoder</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>sequence_length</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24940"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CTCGreedyDecoder"</span><span class='hs-layout'>)</span>
<a name="line-24941"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>sequence_length</span>
<a name="line-24942"></a><span class='hs-comment'>{-
<a name="line-24943"></a>attr {
<a name="line-24944"></a>  default_value { b: false }
<a name="line-24945"></a>  description: "If True, merge repeated classes in output."
<a name="line-24946"></a>  name: "merge_repeated"
<a name="line-24947"></a>  type: "bool"
<a name="line-24948"></a>}
<a name="line-24949"></a>input_arg {
<a name="line-24950"></a>  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
<a name="line-24951"></a>  name: "inputs"
<a name="line-24952"></a>  type: DT_FLOAT
<a name="line-24953"></a>}
<a name="line-24954"></a>input_arg {
<a name="line-24955"></a>  description: "A vector containing sequence lengths, size `(batch_size)`."
<a name="line-24956"></a>  name: "sequence_length"
<a name="line-24957"></a>  type: DT_INT32
<a name="line-24958"></a>}
<a name="line-24959"></a>output_arg {
<a name="line-24960"></a>  description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor&lt;int64, 2&gt;`.  The rows store: [batch, time]."
<a name="line-24961"></a>  name: "decoded_indices"
<a name="line-24962"></a>  type: DT_INT64
<a name="line-24963"></a>}
<a name="line-24964"></a>output_arg {
<a name="line-24965"></a>  description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor&lt;int64, 2&gt;`.  The vector stores the decoded classes."
<a name="line-24966"></a>  name: "decoded_values"
<a name="line-24967"></a>  type: DT_INT64
<a name="line-24968"></a>}
<a name="line-24969"></a>output_arg {
<a name="line-24970"></a>  description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`."
<a name="line-24971"></a>  name: "decoded_shape"
<a name="line-24972"></a>  type: DT_INT64
<a name="line-24973"></a>}
<a name="line-24974"></a>output_arg {
<a name="line-24975"></a>  description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities."
<a name="line-24976"></a>  name: "log_probability"
<a name="line-24977"></a>  type: DT_FLOAT
<a name="line-24978"></a>}
<a name="line-24979"></a>-}</span>
<a name="line-24980"></a>
<a name="line-24981"></a><a name="l2Loss"></a><span class='hs-comment'>-- | L2 Loss.</span>
<a name="line-24982"></a><span class='hs-comment'>--</span>
<a name="line-24983"></a><span class='hs-comment'>-- Computes half the L2 norm of a tensor without the `sqrt`:</span>
<a name="line-24984"></a><span class='hs-comment'>-- </span>
<a name="line-24985"></a><span class='hs-comment'>--     output = sum(t ** 2) / 2</span>
<a name="line-24986"></a><span class='hs-definition'>l2Loss</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24987"></a>                                               <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-24988"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-24989"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-24990"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-24991"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-24992"></a>                                               <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-24993"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __t__: Typically 2-D, but may have any dimensions.</span>
<a name="line-24994"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 0-D.</span>
<a name="line-24995"></a><span class='hs-definition'>l2Loss</span> <span class='hs-varid'>t</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-24996"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"L2Loss"</span>
<a name="line-24997"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-24998"></a>        <span class='hs-varid'>t</span>
<a name="line-24999"></a><span class='hs-comment'>{-
<a name="line-25000"></a>attr {
<a name="line-25001"></a>  allowed_values {
<a name="line-25002"></a>    list {
<a name="line-25003"></a>      type: DT_FLOAT
<a name="line-25004"></a>      type: DT_DOUBLE
<a name="line-25005"></a>      type: DT_INT64
<a name="line-25006"></a>      type: DT_INT32
<a name="line-25007"></a>      type: DT_UINT8
<a name="line-25008"></a>      type: DT_UINT16
<a name="line-25009"></a>      type: DT_INT16
<a name="line-25010"></a>      type: DT_INT8
<a name="line-25011"></a>      type: DT_COMPLEX64
<a name="line-25012"></a>      type: DT_COMPLEX128
<a name="line-25013"></a>      type: DT_QINT8
<a name="line-25014"></a>      type: DT_QUINT8
<a name="line-25015"></a>      type: DT_QINT32
<a name="line-25016"></a>      type: DT_HALF
<a name="line-25017"></a>    }
<a name="line-25018"></a>  }
<a name="line-25019"></a>  name: "T"
<a name="line-25020"></a>  type: "type"
<a name="line-25021"></a>}
<a name="line-25022"></a>input_arg {
<a name="line-25023"></a>  description: "Typically 2-D, but may have any dimensions."
<a name="line-25024"></a>  name: "t"
<a name="line-25025"></a>  type_attr: "T"
<a name="line-25026"></a>}
<a name="line-25027"></a>output_arg { description: "0-D." name: "output" type_attr: "T" }
<a name="line-25028"></a>-}</span>
<a name="line-25029"></a>
<a name="line-25030"></a><a name="segmentMax"></a><span class='hs-comment'>-- | Computes the maximum along segments of a tensor.</span>
<a name="line-25031"></a><span class='hs-comment'>--</span>
<a name="line-25032"></a><span class='hs-comment'>-- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)</span>
<a name="line-25033"></a><span class='hs-comment'>-- for an explanation of segments.</span>
<a name="line-25034"></a><span class='hs-comment'>-- </span>
<a name="line-25035"></a><span class='hs-comment'>-- Computes a tensor such that</span>
<a name="line-25036"></a><span class='hs-comment'>-- \\(output_i = \max_j(data_j)\\) where `max` is over `j` such</span>
<a name="line-25037"></a><span class='hs-comment'>-- that `segment_ids[j] == i`.</span>
<a name="line-25038"></a><span class='hs-comment'>-- </span>
<a name="line-25039"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-25040"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/SegmentMax.png" alt&gt;</span>
<a name="line-25041"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-25042"></a><span class='hs-definition'>segmentMax</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-25043"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25044"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-25045"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-25046"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-25047"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-25048"></a>                                                               <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-25049"></a>                                         <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-25050"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25051"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25052"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-25053"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s</span>
<a name="line-25054"></a>                                    <span class='hs-comment'>-- first dimension.  Values should be sorted and can be repeated.</span>
<a name="line-25055"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Has same shape as data, except for dimension 0 which</span>
<a name="line-25056"></a>              <span class='hs-comment'>-- has size `k`, the number of segments.</span>
<a name="line-25057"></a><span class='hs-definition'>segmentMax</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25058"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SegmentMax"</span>
<a name="line-25059"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25060"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25061"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>segment_ids</span>
<a name="line-25062"></a><span class='hs-comment'>{-
<a name="line-25063"></a>attr {
<a name="line-25064"></a>  allowed_values {
<a name="line-25065"></a>    list {
<a name="line-25066"></a>      type: DT_FLOAT
<a name="line-25067"></a>      type: DT_DOUBLE
<a name="line-25068"></a>      type: DT_INT32
<a name="line-25069"></a>      type: DT_INT64
<a name="line-25070"></a>      type: DT_UINT8
<a name="line-25071"></a>      type: DT_INT16
<a name="line-25072"></a>      type: DT_INT8
<a name="line-25073"></a>      type: DT_UINT16
<a name="line-25074"></a>      type: DT_HALF
<a name="line-25075"></a>    }
<a name="line-25076"></a>  }
<a name="line-25077"></a>  name: "T"
<a name="line-25078"></a>  type: "type"
<a name="line-25079"></a>}
<a name="line-25080"></a>attr {
<a name="line-25081"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-25082"></a>  name: "Tindices"
<a name="line-25083"></a>  type: "type"
<a name="line-25084"></a>}
<a name="line-25085"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-25086"></a>input_arg {
<a name="line-25087"></a>  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
<a name="line-25088"></a>  name: "segment_ids"
<a name="line-25089"></a>  type_attr: "Tindices"
<a name="line-25090"></a>}
<a name="line-25091"></a>output_arg {
<a name="line-25092"></a>  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
<a name="line-25093"></a>  name: "output"
<a name="line-25094"></a>  type_attr: "T"
<a name="line-25095"></a>}
<a name="line-25096"></a>-}</span>
<a name="line-25097"></a>
<a name="line-25098"></a><span class='hs-comment'>-- | Increments 'ref' until it reaches 'limit'.</span>
<a name="line-25099"></a>
<a name="line-25100"></a><a name="countUpTo"></a><span class='hs-definition'>countUpTo</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25101"></a>                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25102"></a>             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __limit__: If incrementing ref would bring it above limit, instead generates an</span>
<a name="line-25103"></a>                            <span class='hs-comment'>-- 'OutOfRange' error.</span>
<a name="line-25104"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a scalar `Variable` node.</span>
<a name="line-25105"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A copy of the input before increment. If nothing else modifies the</span>
<a name="line-25106"></a>             <span class='hs-comment'>-- input, the values produced will all be distinct.</span>
<a name="line-25107"></a><span class='hs-definition'>countUpTo</span> <span class='hs-varid'>limit</span> <span class='hs-varid'>ref</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25108"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CountUpTo"</span>
<a name="line-25109"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25110"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"limit"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>limit</span><span class='hs-layout'>)</span>
<a name="line-25111"></a>        <span class='hs-varid'>ref</span>
<a name="line-25112"></a><span class='hs-comment'>{-
<a name="line-25113"></a>attr {
<a name="line-25114"></a>  description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error."
<a name="line-25115"></a>  name: "limit"
<a name="line-25116"></a>  type: "int"
<a name="line-25117"></a>}
<a name="line-25118"></a>attr {
<a name="line-25119"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-25120"></a>  name: "T"
<a name="line-25121"></a>  type: "type"
<a name="line-25122"></a>}
<a name="line-25123"></a>input_arg {
<a name="line-25124"></a>  description: "Should be from a scalar `Variable` node."
<a name="line-25125"></a>  is_ref: true
<a name="line-25126"></a>  name: "ref"
<a name="line-25127"></a>  type_attr: "T"
<a name="line-25128"></a>}
<a name="line-25129"></a>output_arg {
<a name="line-25130"></a>  description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct."
<a name="line-25131"></a>  name: "output"
<a name="line-25132"></a>  type_attr: "T"
<a name="line-25133"></a>}
<a name="line-25134"></a>-}</span>
<a name="line-25135"></a>
<a name="line-25136"></a><span class='hs-comment'>-- | A Reader that outputs the records from a TensorFlow Records file.</span>
<a name="line-25137"></a>
<a name="line-25138"></a><a name="tFRecordReader"></a><span class='hs-definition'>tFRecordReader</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __reader_handle__: The handle to reference the Reader.</span>
<a name="line-25139"></a><span class='hs-definition'>tFRecordReader</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25140"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TFRecordReader"</span><span class='hs-layout'>)</span>
<a name="line-25141"></a>        
<a name="line-25142"></a><span class='hs-comment'>{-
<a name="line-25143"></a>attr {
<a name="line-25144"></a>  default_value { s: "" }
<a name="line-25145"></a>  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
<a name="line-25146"></a>  name: "container"
<a name="line-25147"></a>  type: "string"
<a name="line-25148"></a>}
<a name="line-25149"></a>attr {
<a name="line-25150"></a>  default_value { s: "" }
<a name="line-25151"></a>  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-25152"></a>  name: "shared_name"
<a name="line-25153"></a>  type: "string"
<a name="line-25154"></a>}
<a name="line-25155"></a>attr {
<a name="line-25156"></a>  default_value { s: "" } name: "compression_type" type: "string"
<a name="line-25157"></a>}
<a name="line-25158"></a>output_arg {
<a name="line-25159"></a>  description: "The handle to reference the Reader."
<a name="line-25160"></a>  is_ref: true
<a name="line-25161"></a>  name: "reader_handle"
<a name="line-25162"></a>  type: DT_STRING
<a name="line-25163"></a>}
<a name="line-25164"></a>-}</span>
<a name="line-25165"></a>
<a name="line-25166"></a><a name="switch"></a><span class='hs-comment'>-- | Forwards `data` to the output port determined by `pred`.</span>
<a name="line-25167"></a><span class='hs-comment'>--</span>
<a name="line-25168"></a><span class='hs-comment'>-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,</span>
<a name="line-25169"></a><span class='hs-comment'>-- the data goes to `output_false`.</span>
<a name="line-25170"></a><span class='hs-comment'>-- </span>
<a name="line-25171"></a><span class='hs-comment'>-- See also `RefSwitch` and `Merge`.</span>
<a name="line-25172"></a><span class='hs-definition'>switch</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25173"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be forwarded to the appropriate output.</span>
<a name="line-25174"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __pred__: A scalar that specifies which output port will receive data.</span>
<a name="line-25175"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25176"></a>          <span class='hs-comment'>-- ^ (__output_false__, __output_true__)</span>
<a name="line-25177"></a>          <span class='hs-comment'>--</span>
<a name="line-25178"></a>          <span class='hs-comment'>-- * __output_false__: If `pred` is false, data will be forwarded to this output.</span>
<a name="line-25179"></a>          <span class='hs-comment'>--</span>
<a name="line-25180"></a>          <span class='hs-comment'>-- * __output_true__: If `pred` is true, data will be forwarded to this output.</span>
<a name="line-25181"></a><span class='hs-definition'>switch</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>pred</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25182"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Switch"</span>
<a name="line-25183"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25184"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>pred</span>
<a name="line-25185"></a><span class='hs-comment'>{-
<a name="line-25186"></a>attr { name: "T" type: "type" }
<a name="line-25187"></a>input_arg {
<a name="line-25188"></a>  description: "The tensor to be forwarded to the appropriate output."
<a name="line-25189"></a>  name: "data"
<a name="line-25190"></a>  type_attr: "T"
<a name="line-25191"></a>}
<a name="line-25192"></a>input_arg {
<a name="line-25193"></a>  description: "A scalar that specifies which output port will receive data."
<a name="line-25194"></a>  name: "pred"
<a name="line-25195"></a>  type: DT_BOOL
<a name="line-25196"></a>}
<a name="line-25197"></a>output_arg {
<a name="line-25198"></a>  description: "If `pred` is false, data will be forwarded to this output."
<a name="line-25199"></a>  name: "output_false"
<a name="line-25200"></a>  type_attr: "T"
<a name="line-25201"></a>}
<a name="line-25202"></a>output_arg {
<a name="line-25203"></a>  description: "If `pred` is true, data will be forwarded to this output."
<a name="line-25204"></a>  name: "output_true"
<a name="line-25205"></a>  type_attr: "T"
<a name="line-25206"></a>}
<a name="line-25207"></a>-}</span>
<a name="line-25208"></a>
<a name="line-25209"></a><a name="sparseSegmentMeanGrad"></a><span class='hs-comment'>-- | Computes gradients for SparseSegmentMean.</span>
<a name="line-25210"></a><span class='hs-comment'>--</span>
<a name="line-25211"></a><span class='hs-comment'>-- Returns tensor "output" with same shape as grad, except for dimension 0 whose</span>
<a name="line-25212"></a><span class='hs-comment'>-- value is output_dim0.</span>
<a name="line-25213"></a><span class='hs-definition'>sparseSegmentMeanGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-25214"></a>                                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-25215"></a>                                                      <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-25216"></a>                                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25217"></a>                                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25218"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __grad__: gradient propagated to the SparseSegmentMean op.</span>
<a name="line-25219"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __indices__: indices passed to the corresponding SparseSegmentMean op.</span>
<a name="line-25220"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op.</span>
<a name="line-25221"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op.</span>
<a name="line-25222"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-25223"></a><span class='hs-definition'>sparseSegmentMeanGrad</span> <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>output_dim0</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25224"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSegmentMeanGrad"</span>
<a name="line-25225"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25226"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25227"></a>        <span class='hs-varid'>grad</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>segment_ids</span> <span class='hs-varid'>output_dim0</span>
<a name="line-25228"></a><span class='hs-comment'>{-
<a name="line-25229"></a>attr {
<a name="line-25230"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-25231"></a>  name: "T"
<a name="line-25232"></a>  type: "type"
<a name="line-25233"></a>}
<a name="line-25234"></a>attr {
<a name="line-25235"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-25236"></a>  default_value { type: DT_INT32 }
<a name="line-25237"></a>  name: "Tidx"
<a name="line-25238"></a>  type: "type"
<a name="line-25239"></a>}
<a name="line-25240"></a>input_arg {
<a name="line-25241"></a>  description: "gradient propagated to the SparseSegmentMean op."
<a name="line-25242"></a>  name: "grad"
<a name="line-25243"></a>  type_attr: "T"
<a name="line-25244"></a>}
<a name="line-25245"></a>input_arg {
<a name="line-25246"></a>  description: "indices passed to the corresponding SparseSegmentMean op."
<a name="line-25247"></a>  name: "indices"
<a name="line-25248"></a>  type_attr: "Tidx"
<a name="line-25249"></a>}
<a name="line-25250"></a>input_arg {
<a name="line-25251"></a>  description: "segment_ids passed to the corresponding SparseSegmentMean op."
<a name="line-25252"></a>  name: "segment_ids"
<a name="line-25253"></a>  type: DT_INT32
<a name="line-25254"></a>}
<a name="line-25255"></a>input_arg {
<a name="line-25256"></a>  description: "dimension 0 of \"data\" passed to SparseSegmentMean op."
<a name="line-25257"></a>  name: "output_dim0"
<a name="line-25258"></a>  type: DT_INT32
<a name="line-25259"></a>}
<a name="line-25260"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-25261"></a>-}</span>
<a name="line-25262"></a>
<a name="line-25263"></a><a name="gatherNd"></a><span class='hs-comment'>-- | Gather values or slices from `params` according to `indices`.</span>
<a name="line-25264"></a><span class='hs-comment'>--</span>
<a name="line-25265"></a><span class='hs-comment'>-- `params` is a Tensor of rank `P` and `indices` is a Tensor of rank `Q`.</span>
<a name="line-25266"></a><span class='hs-comment'>-- </span>
<a name="line-25267"></a><span class='hs-comment'>-- `indices` must be integer tensor, containing indices into `params`.</span>
<a name="line-25268"></a><span class='hs-comment'>-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt; K &lt;= P`.</span>
<a name="line-25269"></a><span class='hs-comment'>-- </span>
<a name="line-25270"></a><span class='hs-comment'>-- The innermost dimension of `indices` (with length `K`) corresponds to</span>
<a name="line-25271"></a><span class='hs-comment'>-- indices into elements (if `K = P`) or slices (if `K &lt; P`) along the `K`th</span>
<a name="line-25272"></a><span class='hs-comment'>-- dimension of `params`.</span>
<a name="line-25273"></a><span class='hs-comment'>-- </span>
<a name="line-25274"></a><span class='hs-comment'>-- Produces an output tensor with shape</span>
<a name="line-25275"></a><span class='hs-comment'>-- </span>
<a name="line-25276"></a><span class='hs-comment'>-- ```</span>
<a name="line-25277"></a><span class='hs-comment'>-- [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]].</span>
<a name="line-25278"></a><span class='hs-comment'>-- ```</span>
<a name="line-25279"></a><span class='hs-comment'>-- </span>
<a name="line-25280"></a><span class='hs-comment'>-- Some examples below.</span>
<a name="line-25281"></a><span class='hs-comment'>-- </span>
<a name="line-25282"></a><span class='hs-comment'>-- Simple indexing into a matrix:</span>
<a name="line-25283"></a><span class='hs-comment'>-- </span>
<a name="line-25284"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25285"></a><span class='hs-comment'>--     indices = [[0, 0], [1, 1]]</span>
<a name="line-25286"></a><span class='hs-comment'>--     params = [['a', 'b'], ['c', 'd']]</span>
<a name="line-25287"></a><span class='hs-comment'>--     output = ['a', 'd']</span>
<a name="line-25288"></a><span class='hs-comment'>-- ```</span>
<a name="line-25289"></a><span class='hs-comment'>-- </span>
<a name="line-25290"></a><span class='hs-comment'>-- Slice indexing into a matrix:</span>
<a name="line-25291"></a><span class='hs-comment'>-- </span>
<a name="line-25292"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25293"></a><span class='hs-comment'>--     indices = [[1], [0]]</span>
<a name="line-25294"></a><span class='hs-comment'>--     params = [['a', 'b'], ['c', 'd']]</span>
<a name="line-25295"></a><span class='hs-comment'>--     output = [['c', 'd'], ['a', 'b']]</span>
<a name="line-25296"></a><span class='hs-comment'>-- ```</span>
<a name="line-25297"></a><span class='hs-comment'>-- </span>
<a name="line-25298"></a><span class='hs-comment'>-- Indexing into a 3-tensor:</span>
<a name="line-25299"></a><span class='hs-comment'>-- </span>
<a name="line-25300"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25301"></a><span class='hs-comment'>--     indices = [[1]]</span>
<a name="line-25302"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25303"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25304"></a><span class='hs-comment'>--     output = [[['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25305"></a><span class='hs-comment'>-- </span>
<a name="line-25306"></a><span class='hs-comment'>-- </span>
<a name="line-25307"></a><span class='hs-comment'>--     indices = [[0, 1], [1, 0]]</span>
<a name="line-25308"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25309"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25310"></a><span class='hs-comment'>--     output = [['c0', 'd0'], ['a1', 'b1']]</span>
<a name="line-25311"></a><span class='hs-comment'>-- </span>
<a name="line-25312"></a><span class='hs-comment'>-- </span>
<a name="line-25313"></a><span class='hs-comment'>--     indices = [[0, 0, 1], [1, 0, 1]]</span>
<a name="line-25314"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25315"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25316"></a><span class='hs-comment'>--     output = ['b0', 'b1']</span>
<a name="line-25317"></a><span class='hs-comment'>-- ```</span>
<a name="line-25318"></a><span class='hs-comment'>-- </span>
<a name="line-25319"></a><span class='hs-comment'>-- Batched indexing into a matrix:</span>
<a name="line-25320"></a><span class='hs-comment'>-- </span>
<a name="line-25321"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25322"></a><span class='hs-comment'>--     indices = [[[0, 0]], [[0, 1]]]</span>
<a name="line-25323"></a><span class='hs-comment'>--     params = [['a', 'b'], ['c', 'd']]</span>
<a name="line-25324"></a><span class='hs-comment'>--     output = [['a'], ['b']]</span>
<a name="line-25325"></a><span class='hs-comment'>-- ```</span>
<a name="line-25326"></a><span class='hs-comment'>-- </span>
<a name="line-25327"></a><span class='hs-comment'>-- Batched slice indexing into a matrix:</span>
<a name="line-25328"></a><span class='hs-comment'>-- </span>
<a name="line-25329"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25330"></a><span class='hs-comment'>--     indices = [[[1]], [[0]]]</span>
<a name="line-25331"></a><span class='hs-comment'>--     params = [['a', 'b'], ['c', 'd']]</span>
<a name="line-25332"></a><span class='hs-comment'>--     output = [[['c', 'd']], [['a', 'b']]]</span>
<a name="line-25333"></a><span class='hs-comment'>-- ```</span>
<a name="line-25334"></a><span class='hs-comment'>-- </span>
<a name="line-25335"></a><span class='hs-comment'>-- Batched indexing into a 3-tensor:</span>
<a name="line-25336"></a><span class='hs-comment'>-- </span>
<a name="line-25337"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25338"></a><span class='hs-comment'>--     indices = [[[1]], [[0]]]</span>
<a name="line-25339"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25340"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25341"></a><span class='hs-comment'>--     output = [[[['a1', 'b1'], ['c1', 'd1']]],</span>
<a name="line-25342"></a><span class='hs-comment'>--               [[['a0', 'b0'], ['c0', 'd0']]]]</span>
<a name="line-25343"></a><span class='hs-comment'>-- </span>
<a name="line-25344"></a><span class='hs-comment'>--     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]</span>
<a name="line-25345"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25346"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25347"></a><span class='hs-comment'>--     output = [[['c0', 'd0'], ['a1', 'b1']],</span>
<a name="line-25348"></a><span class='hs-comment'>--               [['a0', 'b0'], ['c1', 'd1']]]</span>
<a name="line-25349"></a><span class='hs-comment'>-- </span>
<a name="line-25350"></a><span class='hs-comment'>-- </span>
<a name="line-25351"></a><span class='hs-comment'>--     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]</span>
<a name="line-25352"></a><span class='hs-comment'>--     params = [[['a0', 'b0'], ['c0', 'd0']],</span>
<a name="line-25353"></a><span class='hs-comment'>--               [['a1', 'b1'], ['c1', 'd1']]]</span>
<a name="line-25354"></a><span class='hs-comment'>--     output = [['b0', 'b1'], ['d0', 'c1']]</span>
<a name="line-25355"></a><span class='hs-comment'>-- ```</span>
<a name="line-25356"></a><span class='hs-definition'>gatherNd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tparams</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tparams</span><span class='hs-layout'>,</span>
<a name="line-25357"></a>                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-25358"></a>                                             <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25359"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25360"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tparams</span> <span class='hs-comment'>-- ^ __params__: `P-D`.  The tensor from which to gather values.</span>
<a name="line-25361"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: `Q-D`.  Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.</span>
<a name="line-25362"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>tparams</span> <span class='hs-comment'>-- ^ __output__: `(P+Q-K-1)-D`.  Values from `params` gathered from indices given by</span>
<a name="line-25363"></a>            <span class='hs-comment'>-- `indices`.</span>
<a name="line-25364"></a><span class='hs-definition'>gatherNd</span> <span class='hs-varid'>params</span> <span class='hs-varid'>indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25365"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"GatherNd"</span>
<a name="line-25366"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tparams"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tparams</span><span class='hs-layout'>)</span>
<a name="line-25367"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25368"></a>        <span class='hs-varid'>params</span> <span class='hs-varid'>indices</span>
<a name="line-25369"></a><span class='hs-comment'>{-
<a name="line-25370"></a>attr { name: "Tparams" type: "type" }
<a name="line-25371"></a>attr {
<a name="line-25372"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-25373"></a>  name: "Tindices"
<a name="line-25374"></a>  type: "type"
<a name="line-25375"></a>}
<a name="line-25376"></a>input_arg {
<a name="line-25377"></a>  description: "`P-D`.  The tensor from which to gather values."
<a name="line-25378"></a>  name: "params"
<a name="line-25379"></a>  type_attr: "Tparams"
<a name="line-25380"></a>}
<a name="line-25381"></a>input_arg {
<a name="line-25382"></a>  description: "`Q-D`.  Index tensor having shape `[d_0, ..., d_{Q-2}, K]`."
<a name="line-25383"></a>  name: "indices"
<a name="line-25384"></a>  type_attr: "Tindices"
<a name="line-25385"></a>}
<a name="line-25386"></a>output_arg {
<a name="line-25387"></a>  description: "`(P+Q-K-1)-D`.  Values from `params` gathered from indices given by\n`indices`."
<a name="line-25388"></a>  name: "output"
<a name="line-25389"></a>  type_attr: "Tparams"
<a name="line-25390"></a>}
<a name="line-25391"></a>-}</span>
<a name="line-25392"></a>
<a name="line-25393"></a><a name="squeeze"></a><span class='hs-comment'>-- | Removes dimensions of size 1 from the shape of a tensor.</span>
<a name="line-25394"></a><span class='hs-comment'>--</span>
<a name="line-25395"></a><span class='hs-comment'>-- Given a tensor `input`, this operation returns a tensor of the same type with</span>
<a name="line-25396"></a><span class='hs-comment'>-- all dimensions of size 1 removed. If you don't want to remove all size 1</span>
<a name="line-25397"></a><span class='hs-comment'>-- dimensions, you can remove specific size 1 dimensions by specifying</span>
<a name="line-25398"></a><span class='hs-comment'>-- `squeeze_dims`.</span>
<a name="line-25399"></a><span class='hs-comment'>-- </span>
<a name="line-25400"></a><span class='hs-comment'>-- For example:</span>
<a name="line-25401"></a><span class='hs-comment'>-- </span>
<a name="line-25402"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-25403"></a><span class='hs-comment'>-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]</span>
<a name="line-25404"></a><span class='hs-comment'>-- shape(squeeze(t)) ==&gt; [2, 3]</span>
<a name="line-25405"></a><span class='hs-comment'>-- ```</span>
<a name="line-25406"></a><span class='hs-comment'>-- </span>
<a name="line-25407"></a><span class='hs-comment'>-- Or, to remove specific size 1 dimensions:</span>
<a name="line-25408"></a><span class='hs-comment'>-- </span>
<a name="line-25409"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-25410"></a><span class='hs-comment'>-- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]</span>
<a name="line-25411"></a><span class='hs-comment'>-- shape(squeeze(t, [2, 4])) ==&gt; [1, 2, 3, 1]</span>
<a name="line-25412"></a><span class='hs-comment'>-- ```</span>
<a name="line-25413"></a><span class='hs-definition'>squeeze</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25414"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The `input` to squeeze.</span>
<a name="line-25415"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: Contains the same data as `input`, but has one or more dimensions of</span>
<a name="line-25416"></a>           <span class='hs-comment'>-- size 1 removed.</span>
<a name="line-25417"></a><span class='hs-definition'>squeeze</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25418"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Squeeze"</span>
<a name="line-25419"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25420"></a>        <span class='hs-varid'>input</span>
<a name="line-25421"></a><span class='hs-comment'>{-
<a name="line-25422"></a>attr { name: "T" type: "type" }
<a name="line-25423"></a>attr {
<a name="line-25424"></a>  default_value { list { } }
<a name="line-25425"></a>  description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1."
<a name="line-25426"></a>  has_minimum: true
<a name="line-25427"></a>  name: "squeeze_dims"
<a name="line-25428"></a>  type: "list(int)"
<a name="line-25429"></a>}
<a name="line-25430"></a>input_arg {
<a name="line-25431"></a>  description: "The `input` to squeeze." name: "input" type_attr: "T"
<a name="line-25432"></a>}
<a name="line-25433"></a>output_arg {
<a name="line-25434"></a>  description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed."
<a name="line-25435"></a>  name: "output"
<a name="line-25436"></a>  type_attr: "T"
<a name="line-25437"></a>}
<a name="line-25438"></a>-}</span>
<a name="line-25439"></a>
<a name="line-25440"></a><a name="randomUniform"></a><span class='hs-comment'>-- | Outputs random values from a uniform distribution.</span>
<a name="line-25441"></a><span class='hs-comment'>--</span>
<a name="line-25442"></a><span class='hs-comment'>-- The generated values follow a uniform distribution in the range `[0, 1)`. The</span>
<a name="line-25443"></a><span class='hs-comment'>-- lower bound 0 is included in the range, while the upper bound 1 is excluded.</span>
<a name="line-25444"></a><span class='hs-definition'>randomUniform</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>dtype</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span>
<a name="line-25445"></a>                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-25446"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-25447"></a>                                      <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25448"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25449"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __shape__: The shape of the output tensor.</span>
<a name="line-25450"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: A tensor of the specified shape filled with uniform random values.</span>
<a name="line-25451"></a><span class='hs-definition'>randomUniform</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25452"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomUniform"</span>
<a name="line-25453"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"dtype"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>dtype</span><span class='hs-layout'>)</span>
<a name="line-25454"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25455"></a>        <span class='hs-varid'>shape</span>
<a name="line-25456"></a><span class='hs-comment'>{-
<a name="line-25457"></a>attr {
<a name="line-25458"></a>  default_value { i: 0 }
<a name="line-25459"></a>  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-25460"></a>  name: "seed"
<a name="line-25461"></a>  type: "int"
<a name="line-25462"></a>}
<a name="line-25463"></a>attr {
<a name="line-25464"></a>  default_value { i: 0 }
<a name="line-25465"></a>  description: "A second seed to avoid seed collision."
<a name="line-25466"></a>  name: "seed2"
<a name="line-25467"></a>  type: "int"
<a name="line-25468"></a>}
<a name="line-25469"></a>attr {
<a name="line-25470"></a>  allowed_values {
<a name="line-25471"></a>    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
<a name="line-25472"></a>  }
<a name="line-25473"></a>  description: "The type of the output."
<a name="line-25474"></a>  name: "dtype"
<a name="line-25475"></a>  type: "type"
<a name="line-25476"></a>}
<a name="line-25477"></a>attr {
<a name="line-25478"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-25479"></a>  name: "T"
<a name="line-25480"></a>  type: "type"
<a name="line-25481"></a>}
<a name="line-25482"></a>input_arg {
<a name="line-25483"></a>  description: "The shape of the output tensor."
<a name="line-25484"></a>  name: "shape"
<a name="line-25485"></a>  type_attr: "T"
<a name="line-25486"></a>}
<a name="line-25487"></a>output_arg {
<a name="line-25488"></a>  description: "A tensor of the specified shape filled with uniform random values."
<a name="line-25489"></a>  name: "output"
<a name="line-25490"></a>  type_attr: "dtype"
<a name="line-25491"></a>}
<a name="line-25492"></a>-}</span>
<a name="line-25493"></a>
<a name="line-25494"></a><a name="readerReadUpTo"></a><span class='hs-comment'>-- | Returns up to `num_records` (key, value) pairs produced by a Reader.</span>
<a name="line-25495"></a><span class='hs-comment'>--</span>
<a name="line-25496"></a><span class='hs-comment'>-- Will dequeue from the input queue if necessary (e.g. when the</span>
<a name="line-25497"></a><span class='hs-comment'>-- Reader needs to start reading from a new file since it has finished</span>
<a name="line-25498"></a><span class='hs-comment'>-- with the previous file).</span>
<a name="line-25499"></a><span class='hs-comment'>-- It may return less than `num_records` even before the last batch.</span>
<a name="line-25500"></a><span class='hs-definition'>readerReadUpTo</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a `Reader`.</span>
<a name="line-25501"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __queue_handle__: Handle to a `Queue`, with string work items.</span>
<a name="line-25502"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_records__: number of records to read from `Reader`.</span>
<a name="line-25503"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>,</span>
<a name="line-25504"></a>                             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25505"></a>                  <span class='hs-comment'>-- ^ (__keys__, __values__)</span>
<a name="line-25506"></a>                  <span class='hs-comment'>--</span>
<a name="line-25507"></a>                  <span class='hs-comment'>-- * __keys__: A 1-D tensor.</span>
<a name="line-25508"></a>                  <span class='hs-comment'>--</span>
<a name="line-25509"></a>                  <span class='hs-comment'>-- * __values__: A 1-D tensor.</span>
<a name="line-25510"></a><span class='hs-definition'>readerReadUpTo</span> <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>queue_handle</span> <span class='hs-varid'>num_records</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25511"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderReadUpTo"</span><span class='hs-layout'>)</span>
<a name="line-25512"></a>        <span class='hs-varid'>reader_handle</span> <span class='hs-varid'>queue_handle</span> <span class='hs-varid'>num_records</span>
<a name="line-25513"></a><span class='hs-comment'>{-
<a name="line-25514"></a>input_arg {
<a name="line-25515"></a>  description: "Handle to a `Reader`."
<a name="line-25516"></a>  is_ref: true
<a name="line-25517"></a>  name: "reader_handle"
<a name="line-25518"></a>  type: DT_STRING
<a name="line-25519"></a>}
<a name="line-25520"></a>input_arg {
<a name="line-25521"></a>  description: "Handle to a `Queue`, with string work items."
<a name="line-25522"></a>  is_ref: true
<a name="line-25523"></a>  name: "queue_handle"
<a name="line-25524"></a>  type: DT_STRING
<a name="line-25525"></a>}
<a name="line-25526"></a>input_arg {
<a name="line-25527"></a>  description: "number of records to read from `Reader`."
<a name="line-25528"></a>  name: "num_records"
<a name="line-25529"></a>  type: DT_INT64
<a name="line-25530"></a>}
<a name="line-25531"></a>output_arg {
<a name="line-25532"></a>  description: "A 1-D tensor." name: "keys" type: DT_STRING
<a name="line-25533"></a>}
<a name="line-25534"></a>output_arg {
<a name="line-25535"></a>  description: "A 1-D tensor." name: "values" type: DT_STRING
<a name="line-25536"></a>}
<a name="line-25537"></a>-}</span>
<a name="line-25538"></a>
<a name="line-25539"></a><span class='hs-comment'>-- | Computes the gradients of 3-D convolution with respect to the input.</span>
<a name="line-25540"></a>
<a name="line-25541"></a><a name="conv3DBackpropInput"></a><span class='hs-definition'>conv3DBackpropInput</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-25542"></a>                                            <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-25543"></a>                                                    <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-25544"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-25545"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-25546"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-25547"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-25548"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-25549"></a>                                                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-25550"></a>                                                    <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25551"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.</span>
<a name="line-25552"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.</span>
<a name="line-25553"></a>                                      <span class='hs-comment'>-- `in_channels` must match between `input` and `filter`.</span>
<a name="line-25554"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,</span>
<a name="line-25555"></a>                                      <span class='hs-comment'>-- out_channels]`.</span>
<a name="line-25556"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-25557"></a><span class='hs-definition'>conv3DBackpropInput</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25558"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Conv3DBackpropInput"</span>
<a name="line-25559"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25560"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-varid'>out_backprop</span>
<a name="line-25561"></a><span class='hs-comment'>{-
<a name="line-25562"></a>attr {
<a name="line-25563"></a>  allowed_values {
<a name="line-25564"></a>    list {
<a name="line-25565"></a>      type: DT_FLOAT
<a name="line-25566"></a>      type: DT_DOUBLE
<a name="line-25567"></a>      type: DT_INT64
<a name="line-25568"></a>      type: DT_INT32
<a name="line-25569"></a>      type: DT_UINT8
<a name="line-25570"></a>      type: DT_UINT16
<a name="line-25571"></a>      type: DT_INT16
<a name="line-25572"></a>      type: DT_INT8
<a name="line-25573"></a>      type: DT_COMPLEX64
<a name="line-25574"></a>      type: DT_COMPLEX128
<a name="line-25575"></a>      type: DT_QINT8
<a name="line-25576"></a>      type: DT_QUINT8
<a name="line-25577"></a>      type: DT_QINT32
<a name="line-25578"></a>      type: DT_HALF
<a name="line-25579"></a>    }
<a name="line-25580"></a>  }
<a name="line-25581"></a>  name: "T"
<a name="line-25582"></a>  type: "type"
<a name="line-25583"></a>}
<a name="line-25584"></a>attr {
<a name="line-25585"></a>  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
<a name="line-25586"></a>  has_minimum: true
<a name="line-25587"></a>  minimum: 5
<a name="line-25588"></a>  name: "strides"
<a name="line-25589"></a>  type: "list(int)"
<a name="line-25590"></a>}
<a name="line-25591"></a>attr {
<a name="line-25592"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-25593"></a>  description: "The type of padding algorithm to use."
<a name="line-25594"></a>  name: "padding"
<a name="line-25595"></a>  type: "string"
<a name="line-25596"></a>}
<a name="line-25597"></a>input_arg {
<a name="line-25598"></a>  description: "Shape `[batch, depth, rows, cols, in_channels]`."
<a name="line-25599"></a>  name: "input"
<a name="line-25600"></a>  type_attr: "T"
<a name="line-25601"></a>}
<a name="line-25602"></a>input_arg {
<a name="line-25603"></a>  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
<a name="line-25604"></a>  name: "filter"
<a name="line-25605"></a>  type_attr: "T"
<a name="line-25606"></a>}
<a name="line-25607"></a>input_arg {
<a name="line-25608"></a>  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
<a name="line-25609"></a>  name: "out_backprop"
<a name="line-25610"></a>  type_attr: "T"
<a name="line-25611"></a>}
<a name="line-25612"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-25613"></a>-}</span>
<a name="line-25614"></a>
<a name="line-25615"></a><a name="depthwiseConv2dNative"></a><span class='hs-comment'>-- | Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.</span>
<a name="line-25616"></a><span class='hs-comment'>--</span>
<a name="line-25617"></a><span class='hs-comment'>-- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`</span>
<a name="line-25618"></a><span class='hs-comment'>-- and a filter / kernel tensor of shape</span>
<a name="line-25619"></a><span class='hs-comment'>-- `[filter_height, filter_width, in_channels, channel_multiplier]`, containing</span>
<a name="line-25620"></a><span class='hs-comment'>-- `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies</span>
<a name="line-25621"></a><span class='hs-comment'>-- a different filter to each input channel (expanding from 1 channel to</span>
<a name="line-25622"></a><span class='hs-comment'>-- `channel_multiplier` channels for each), then concatenates the results</span>
<a name="line-25623"></a><span class='hs-comment'>-- together. Thus, the output has `in_channels * channel_multiplier` channels.</span>
<a name="line-25624"></a><span class='hs-comment'>-- </span>
<a name="line-25625"></a><span class='hs-comment'>-- for k in 0..in_channels-1</span>
<a name="line-25626"></a><span class='hs-comment'>--   for q in 0..channel_multiplier-1</span>
<a name="line-25627"></a><span class='hs-comment'>--     output[b, i, j, k * channel_multiplier + q] =</span>
<a name="line-25628"></a><span class='hs-comment'>--       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *</span>
<a name="line-25629"></a><span class='hs-comment'>--                         filter[di, dj, k, q]</span>
<a name="line-25630"></a><span class='hs-comment'>-- </span>
<a name="line-25631"></a><span class='hs-comment'>-- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same</span>
<a name="line-25632"></a><span class='hs-comment'>-- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.</span>
<a name="line-25633"></a><span class='hs-definition'>depthwiseConv2dNative</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-25634"></a>                                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25635"></a>                         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-25636"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __filter__</span>
<a name="line-25637"></a>                         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-25638"></a><span class='hs-definition'>depthwiseConv2dNative</span> <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25639"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DepthwiseConv2dNative"</span>
<a name="line-25640"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25641"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>filter</span>
<a name="line-25642"></a><span class='hs-comment'>{-
<a name="line-25643"></a>attr {
<a name="line-25644"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-25645"></a>  name: "T"
<a name="line-25646"></a>  type: "type"
<a name="line-25647"></a>}
<a name="line-25648"></a>attr {
<a name="line-25649"></a>  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`."
<a name="line-25650"></a>  name: "strides"
<a name="line-25651"></a>  type: "list(int)"
<a name="line-25652"></a>}
<a name="line-25653"></a>attr {
<a name="line-25654"></a>  allowed_values { list { s: "SAME" s: "VALID" } }
<a name="line-25655"></a>  description: "The type of padding algorithm to use."
<a name="line-25656"></a>  name: "padding"
<a name="line-25657"></a>  type: "string"
<a name="line-25658"></a>}
<a name="line-25659"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-25660"></a>input_arg { name: "filter" type_attr: "T" }
<a name="line-25661"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-25662"></a>-}</span>
<a name="line-25663"></a>
<a name="line-25664"></a><a name="learnedUnigramCandidateSampler"></a><span class='hs-comment'>-- | Generates labels for candidate sampling with a learned unigram distribution.</span>
<a name="line-25665"></a><span class='hs-comment'>--</span>
<a name="line-25666"></a><span class='hs-comment'>-- See explanations of candidate sampling and the data formats at</span>
<a name="line-25667"></a><span class='hs-comment'>-- go/candidate-sampling.</span>
<a name="line-25668"></a><span class='hs-comment'>-- </span>
<a name="line-25669"></a><span class='hs-comment'>-- For each batch, this op picks a single set of sampled candidate labels.</span>
<a name="line-25670"></a><span class='hs-comment'>-- </span>
<a name="line-25671"></a><span class='hs-comment'>-- The advantages of sampling candidates per-batch are simplicity and the</span>
<a name="line-25672"></a><span class='hs-comment'>-- possibility of efficient dense matrix multiplication. The disadvantage is that</span>
<a name="line-25673"></a><span class='hs-comment'>-- the sampled candidates must be chosen independently of the context and of the</span>
<a name="line-25674"></a><span class='hs-comment'>-- true labels.</span>
<a name="line-25675"></a><span class='hs-definition'>learnedUnigramCandidateSampler</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_sampled__: Number of candidates to randomly sample per batch.</span>
<a name="line-25676"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_true__: Number of true labels per context.</span>
<a name="line-25677"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).</span>
<a name="line-25678"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __unique__: If unique is true, we sample with rejection, so that all sampled</span>
<a name="line-25679"></a>                                          <span class='hs-comment'>-- candidates in a batch are unique. This requires some approximation to</span>
<a name="line-25680"></a>                                          <span class='hs-comment'>-- estimate the post-rejection sampling probabilities.</span>
<a name="line-25681"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the</span>
<a name="line-25682"></a>                                                              <span class='hs-comment'>-- IDs of the num_true target_classes in the corresponding original label.</span>
<a name="line-25683"></a>                                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-25684"></a>                                      <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-25685"></a>                                  <span class='hs-comment'>-- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)</span>
<a name="line-25686"></a>                                  <span class='hs-comment'>--</span>
<a name="line-25687"></a>                                  <span class='hs-comment'>-- * __sampled_candidates__: A vector of length num_sampled, in which each element is</span>
<a name="line-25688"></a>                                  <span class='hs-comment'>-- the ID of a sampled candidate.</span>
<a name="line-25689"></a>                                  <span class='hs-comment'>--</span>
<a name="line-25690"></a>                                  <span class='hs-comment'>-- * __true_expected_count__: A batch_size * num_true matrix, representing</span>
<a name="line-25691"></a>                                  <span class='hs-comment'>-- the number of times each candidate is expected to occur in a batch</span>
<a name="line-25692"></a>                                  <span class='hs-comment'>-- of sampled candidates. If unique=true, then this is a probability.</span>
<a name="line-25693"></a>                                  <span class='hs-comment'>--</span>
<a name="line-25694"></a>                                  <span class='hs-comment'>-- * __sampled_expected_count__: A vector of length num_sampled, for each sampled</span>
<a name="line-25695"></a>                                  <span class='hs-comment'>-- candidate representing the number of times the candidate is expected</span>
<a name="line-25696"></a>                                  <span class='hs-comment'>-- to occur in a batch of sampled candidates.  If unique=true, then this is a</span>
<a name="line-25697"></a>                                  <span class='hs-comment'>-- probability.</span>
<a name="line-25698"></a><span class='hs-definition'>learnedUnigramCandidateSampler</span> <span class='hs-varid'>num_sampled</span> <span class='hs-varid'>num_true</span> <span class='hs-varid'>range_max</span> <span class='hs-varid'>unique</span>
<a name="line-25699"></a>                               <span class='hs-varid'>true_classes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25700"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LearnedUnigramCandidateSampler"</span>
<a name="line-25701"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sampled"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sampled</span>
<a name="line-25702"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_true"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_true</span>
<a name="line-25703"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"range_max"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>range_max</span>
<a name="line-25704"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"unique"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>unique</span><span class='hs-layout'>)</span>
<a name="line-25705"></a>        <span class='hs-varid'>true_classes</span>
<a name="line-25706"></a><span class='hs-comment'>{-
<a name="line-25707"></a>attr {
<a name="line-25708"></a>  description: "Number of true labels per context."
<a name="line-25709"></a>  has_minimum: true
<a name="line-25710"></a>  minimum: 1
<a name="line-25711"></a>  name: "num_true"
<a name="line-25712"></a>  type: "int"
<a name="line-25713"></a>}
<a name="line-25714"></a>attr {
<a name="line-25715"></a>  description: "Number of candidates to randomly sample per batch."
<a name="line-25716"></a>  has_minimum: true
<a name="line-25717"></a>  minimum: 1
<a name="line-25718"></a>  name: "num_sampled"
<a name="line-25719"></a>  type: "int"
<a name="line-25720"></a>}
<a name="line-25721"></a>attr {
<a name="line-25722"></a>  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
<a name="line-25723"></a>  name: "unique"
<a name="line-25724"></a>  type: "bool"
<a name="line-25725"></a>}
<a name="line-25726"></a>attr {
<a name="line-25727"></a>  description: "The sampler will sample integers from the interval [0, range_max)."
<a name="line-25728"></a>  has_minimum: true
<a name="line-25729"></a>  minimum: 1
<a name="line-25730"></a>  name: "range_max"
<a name="line-25731"></a>  type: "int"
<a name="line-25732"></a>}
<a name="line-25733"></a>attr {
<a name="line-25734"></a>  default_value { i: 0 }
<a name="line-25735"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-25736"></a>  name: "seed"
<a name="line-25737"></a>  type: "int"
<a name="line-25738"></a>}
<a name="line-25739"></a>attr {
<a name="line-25740"></a>  default_value { i: 0 }
<a name="line-25741"></a>  description: "An second seed to avoid seed collision."
<a name="line-25742"></a>  name: "seed2"
<a name="line-25743"></a>  type: "int"
<a name="line-25744"></a>}
<a name="line-25745"></a>input_arg {
<a name="line-25746"></a>  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
<a name="line-25747"></a>  name: "true_classes"
<a name="line-25748"></a>  type: DT_INT64
<a name="line-25749"></a>}
<a name="line-25750"></a>output_arg {
<a name="line-25751"></a>  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
<a name="line-25752"></a>  name: "sampled_candidates"
<a name="line-25753"></a>  type: DT_INT64
<a name="line-25754"></a>}
<a name="line-25755"></a>output_arg {
<a name="line-25756"></a>  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
<a name="line-25757"></a>  name: "true_expected_count"
<a name="line-25758"></a>  type: DT_FLOAT
<a name="line-25759"></a>}
<a name="line-25760"></a>output_arg {
<a name="line-25761"></a>  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
<a name="line-25762"></a>  name: "sampled_expected_count"
<a name="line-25763"></a>  type: DT_FLOAT
<a name="line-25764"></a>}
<a name="line-25765"></a>-}</span>
<a name="line-25766"></a>
<a name="line-25767"></a><span class='hs-comment'>-- | Table initializer that takes two tensors for keys and values respectively.</span>
<a name="line-25768"></a>
<a name="line-25769"></a><a name="initializeTable"></a><span class='hs-definition'>initializeTable</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tkey</span> <span class='hs-varid'>tval</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tkey</span><span class='hs-layout'>,</span>
<a name="line-25770"></a>                                             <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tval</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25771"></a>                   <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to a table which will be initialized.</span>
<a name="line-25772"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tkey</span> <span class='hs-comment'>-- ^ __keys__: Keys of type Tkey.</span>
<a name="line-25773"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tval</span> <span class='hs-comment'>-- ^ __values__: Values of type Tval.</span>
<a name="line-25774"></a>                   <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-25775"></a><span class='hs-definition'>initializeTable</span> <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25776"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"InitializeTable"</span>
<a name="line-25777"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tkey"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tkey</span><span class='hs-layout'>)</span>
<a name="line-25778"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tval"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tval</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25779"></a>        <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span>
<a name="line-25780"></a><span class='hs-comment'>{-
<a name="line-25781"></a>attr { name: "Tkey" type: "type" }
<a name="line-25782"></a>attr { name: "Tval" type: "type" }
<a name="line-25783"></a>input_arg {
<a name="line-25784"></a>  description: "Handle to a table which will be initialized."
<a name="line-25785"></a>  is_ref: true
<a name="line-25786"></a>  name: "table_handle"
<a name="line-25787"></a>  type: DT_STRING
<a name="line-25788"></a>}
<a name="line-25789"></a>input_arg {
<a name="line-25790"></a>  description: "Keys of type Tkey." name: "keys" type_attr: "Tkey"
<a name="line-25791"></a>}
<a name="line-25792"></a>input_arg {
<a name="line-25793"></a>  description: "Values of type Tval."
<a name="line-25794"></a>  name: "values"
<a name="line-25795"></a>  type_attr: "Tval"
<a name="line-25796"></a>}
<a name="line-25797"></a>-}</span>
<a name="line-25798"></a>
<a name="line-25799"></a><a name="merge"></a><span class='hs-comment'>-- | Forwards the value of an available tensor from `inputs` to `output`.</span>
<a name="line-25800"></a><span class='hs-comment'>--</span>
<a name="line-25801"></a><span class='hs-comment'>-- `Merge` waits for at least one of the tensors in `inputs` to become available.</span>
<a name="line-25802"></a><span class='hs-comment'>-- It is usually combined with `Switch` to implement branching.</span>
<a name="line-25803"></a><span class='hs-comment'>-- </span>
<a name="line-25804"></a><span class='hs-comment'>-- `Merge` forwards the first tensor for become available to `output`, and sets</span>
<a name="line-25805"></a><span class='hs-comment'>-- `value_index` to its index in `inputs`.</span>
<a name="line-25806"></a><span class='hs-definition'>merge</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25807"></a>         <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: The input tensors, exactly one of which will become available.</span>
<a name="line-25808"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span>
<a name="line-25809"></a>         <span class='hs-comment'>-- ^ (__output__, __value_index__)</span>
<a name="line-25810"></a>         <span class='hs-comment'>--</span>
<a name="line-25811"></a>         <span class='hs-comment'>-- * __output__: Will be set to the available input tensor.</span>
<a name="line-25812"></a>         <span class='hs-comment'>--</span>
<a name="line-25813"></a>         <span class='hs-comment'>-- * __value_index__: The index of the chosen input tensor in `inputs`.</span>
<a name="line-25814"></a><span class='hs-definition'>merge</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25815"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Merge"</span>
<a name="line-25816"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25817"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-25818"></a>        <span class='hs-varid'>inputs</span>
<a name="line-25819"></a>  <span class='hs-keyword'>where</span>
<a name="line-25820"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-25821"></a><span class='hs-comment'>{-
<a name="line-25822"></a>attr { name: "T" type: "type" }
<a name="line-25823"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-25824"></a>input_arg {
<a name="line-25825"></a>  description: "The input tensors, exactly one of which will become available."
<a name="line-25826"></a>  name: "inputs"
<a name="line-25827"></a>  number_attr: "N"
<a name="line-25828"></a>  type_attr: "T"
<a name="line-25829"></a>}
<a name="line-25830"></a>output_arg {
<a name="line-25831"></a>  description: "Will be set to the available input tensor."
<a name="line-25832"></a>  name: "output"
<a name="line-25833"></a>  type_attr: "T"
<a name="line-25834"></a>}
<a name="line-25835"></a>output_arg {
<a name="line-25836"></a>  description: "The index of the chosen input tensor in `inputs`."
<a name="line-25837"></a>  name: "value_index"
<a name="line-25838"></a>  type: DT_INT32
<a name="line-25839"></a>}
<a name="line-25840"></a>-}</span>
<a name="line-25841"></a>
<a name="line-25842"></a><a name="refMerge"></a><span class='hs-comment'>-- | Forwards the value of an available tensor from `inputs` to `output`.</span>
<a name="line-25843"></a><span class='hs-comment'>--</span>
<a name="line-25844"></a><span class='hs-comment'>-- `Merge` waits for at least one of the tensors in `inputs` to become available.</span>
<a name="line-25845"></a><span class='hs-comment'>-- It is usually combined with `Switch` to implement branching.</span>
<a name="line-25846"></a><span class='hs-comment'>-- </span>
<a name="line-25847"></a><span class='hs-comment'>-- `Merge` forwards the first tensor for become available to `output`, and sets</span>
<a name="line-25848"></a><span class='hs-comment'>-- `value_index` to its index in `inputs`.</span>
<a name="line-25849"></a><span class='hs-definition'>refMerge</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25850"></a>            <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: The input tensors, exactly one of which will become available.</span>
<a name="line-25851"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25852"></a>            <span class='hs-comment'>-- ^ (__output__, __value_index__)</span>
<a name="line-25853"></a>            <span class='hs-comment'>--</span>
<a name="line-25854"></a>            <span class='hs-comment'>-- * __output__: Will be set to the available input tensor.</span>
<a name="line-25855"></a>            <span class='hs-comment'>--</span>
<a name="line-25856"></a>            <span class='hs-comment'>-- * __value_index__: The index of the chosen input tensor in `inputs`.</span>
<a name="line-25857"></a><span class='hs-definition'>refMerge</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25858"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefMerge"</span>
<a name="line-25859"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25860"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-25861"></a>        <span class='hs-varid'>inputs</span>
<a name="line-25862"></a>  <span class='hs-keyword'>where</span>
<a name="line-25863"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-25864"></a><span class='hs-comment'>{-
<a name="line-25865"></a>attr { name: "T" type: "type" }
<a name="line-25866"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-25867"></a>input_arg {
<a name="line-25868"></a>  description: "The input tensors, exactly one of which will become available."
<a name="line-25869"></a>  is_ref: true
<a name="line-25870"></a>  name: "inputs"
<a name="line-25871"></a>  number_attr: "N"
<a name="line-25872"></a>  type_attr: "T"
<a name="line-25873"></a>}
<a name="line-25874"></a>output_arg {
<a name="line-25875"></a>  description: "Will be set to the available input tensor."
<a name="line-25876"></a>  is_ref: true
<a name="line-25877"></a>  name: "output"
<a name="line-25878"></a>  type_attr: "T"
<a name="line-25879"></a>}
<a name="line-25880"></a>output_arg {
<a name="line-25881"></a>  description: "The index of the chosen input tensor in `inputs`."
<a name="line-25882"></a>  name: "value_index"
<a name="line-25883"></a>  type: DT_INT32
<a name="line-25884"></a>}
<a name="line-25885"></a>-}</span>
<a name="line-25886"></a>
<a name="line-25887"></a><a name="round"></a><span class='hs-comment'>-- | Rounds the values of a tensor to the nearest integer, element-wise.</span>
<a name="line-25888"></a><span class='hs-comment'>--</span>
<a name="line-25889"></a><span class='hs-comment'>-- Rounds half to even.  Also known as bankers rounding. If you want to round</span>
<a name="line-25890"></a><span class='hs-comment'>-- according to the current system rounding mode use std::cint.</span>
<a name="line-25891"></a><span class='hs-definition'>round</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-25892"></a>                                              <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-25893"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-25894"></a>                                              <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-25895"></a>                                              <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25896"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-25897"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-25898"></a><span class='hs-definition'>round</span> <span class='hs-varid'>x</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25899"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Round"</span>
<a name="line-25900"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25901"></a>        <span class='hs-varid'>x</span>
<a name="line-25902"></a><span class='hs-comment'>{-
<a name="line-25903"></a>attr {
<a name="line-25904"></a>  allowed_values {
<a name="line-25905"></a>    list {
<a name="line-25906"></a>      type: DT_HALF
<a name="line-25907"></a>      type: DT_FLOAT
<a name="line-25908"></a>      type: DT_DOUBLE
<a name="line-25909"></a>      type: DT_INT32
<a name="line-25910"></a>      type: DT_INT64
<a name="line-25911"></a>      type: DT_COMPLEX64
<a name="line-25912"></a>      type: DT_COMPLEX128
<a name="line-25913"></a>    }
<a name="line-25914"></a>  }
<a name="line-25915"></a>  name: "T"
<a name="line-25916"></a>  type: "type"
<a name="line-25917"></a>}
<a name="line-25918"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-25919"></a>output_arg { name: "y" type_attr: "T" }
<a name="line-25920"></a>-}</span>
<a name="line-25921"></a>
<a name="line-25922"></a><span class='hs-comment'>-- | </span>
<a name="line-25923"></a>
<a name="line-25924"></a><a name="batchSelfAdjointEig"></a><span class='hs-definition'>batchSelfAdjointEig</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25925"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__</span>
<a name="line-25926"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-25927"></a><span class='hs-definition'>batchSelfAdjointEig</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25928"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchSelfAdjointEig"</span>
<a name="line-25929"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-25930"></a>        <span class='hs-varid'>input</span>
<a name="line-25931"></a><span class='hs-comment'>{-
<a name="line-25932"></a>attr {
<a name="line-25933"></a>  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
<a name="line-25934"></a>  name: "T"
<a name="line-25935"></a>  type: "type"
<a name="line-25936"></a>}
<a name="line-25937"></a>input_arg { name: "input" type_attr: "T" }
<a name="line-25938"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-25939"></a>-}</span>
<a name="line-25940"></a>
<a name="line-25941"></a><a name="dynamicPartition"></a><span class='hs-comment'>-- | Partitions `data` into `num_partitions` tensors using indices from `partitions`.</span>
<a name="line-25942"></a><span class='hs-comment'>--</span>
<a name="line-25943"></a><span class='hs-comment'>-- For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`</span>
<a name="line-25944"></a><span class='hs-comment'>-- becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`</span>
<a name="line-25945"></a><span class='hs-comment'>-- are placed in `outputs[i]` in lexicographic order of `js`, and the first</span>
<a name="line-25946"></a><span class='hs-comment'>-- dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.</span>
<a name="line-25947"></a><span class='hs-comment'>-- In detail,</span>
<a name="line-25948"></a><span class='hs-comment'>-- </span>
<a name="line-25949"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25950"></a><span class='hs-comment'>--     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]</span>
<a name="line-25951"></a><span class='hs-comment'>-- </span>
<a name="line-25952"></a><span class='hs-comment'>--     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])</span>
<a name="line-25953"></a><span class='hs-comment'>-- ```</span>
<a name="line-25954"></a><span class='hs-comment'>-- </span>
<a name="line-25955"></a><span class='hs-comment'>-- `data.shape` must start with `partitions.shape`.</span>
<a name="line-25956"></a><span class='hs-comment'>-- </span>
<a name="line-25957"></a><span class='hs-comment'>-- For example:</span>
<a name="line-25958"></a><span class='hs-comment'>-- </span>
<a name="line-25959"></a><span class='hs-comment'>-- ```python</span>
<a name="line-25960"></a><span class='hs-comment'>--     # Scalar partitions.</span>
<a name="line-25961"></a><span class='hs-comment'>--     partitions = 1</span>
<a name="line-25962"></a><span class='hs-comment'>--     num_partitions = 2</span>
<a name="line-25963"></a><span class='hs-comment'>--     data = [10, 20]</span>
<a name="line-25964"></a><span class='hs-comment'>--     outputs[0] = []  # Empty with shape [0, 2]</span>
<a name="line-25965"></a><span class='hs-comment'>--     outputs[1] = [[10, 20]]</span>
<a name="line-25966"></a><span class='hs-comment'>-- </span>
<a name="line-25967"></a><span class='hs-comment'>--     # Vector partitions.</span>
<a name="line-25968"></a><span class='hs-comment'>--     partitions = [0, 0, 1, 1, 0]</span>
<a name="line-25969"></a><span class='hs-comment'>--     num_partitions = 2</span>
<a name="line-25970"></a><span class='hs-comment'>--     data = [10, 20, 30, 40, 50]</span>
<a name="line-25971"></a><span class='hs-comment'>--     outputs[0] = [10, 20, 50]</span>
<a name="line-25972"></a><span class='hs-comment'>--     outputs[1] = [30, 40]</span>
<a name="line-25973"></a><span class='hs-comment'>-- ```</span>
<a name="line-25974"></a><span class='hs-comment'>-- </span>
<a name="line-25975"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-25976"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/DynamicPartition.png" alt&gt;</span>
<a name="line-25977"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-25978"></a><span class='hs-definition'>dynamicPartition</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-25979"></a>                    <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_partitions__: The number of partitions to output.</span>
<a name="line-25980"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__</span>
<a name="line-25981"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __partitions__: Any shape.  Indices in the range `[0, num_partitions)`.</span>
<a name="line-25982"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __outputs__</span>
<a name="line-25983"></a><span class='hs-definition'>dynamicPartition</span> <span class='hs-varid'>num_partitions</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>partitions</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-25984"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num_partitions</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DynamicPartition"</span>
<a name="line-25985"></a>                                  <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-25986"></a>                                  <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_partitions"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_partitions</span><span class='hs-layout'>)</span>
<a name="line-25987"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>partitions</span>
<a name="line-25988"></a><span class='hs-comment'>{-
<a name="line-25989"></a>attr {
<a name="line-25990"></a>  description: "The number of partitions to output."
<a name="line-25991"></a>  has_minimum: true
<a name="line-25992"></a>  minimum: 1
<a name="line-25993"></a>  name: "num_partitions"
<a name="line-25994"></a>  type: "int"
<a name="line-25995"></a>}
<a name="line-25996"></a>attr { name: "T" type: "type" }
<a name="line-25997"></a>input_arg { name: "data" type_attr: "T" }
<a name="line-25998"></a>input_arg {
<a name="line-25999"></a>  description: "Any shape.  Indices in the range `[0, num_partitions)`."
<a name="line-26000"></a>  name: "partitions"
<a name="line-26001"></a>  type: DT_INT32
<a name="line-26002"></a>}
<a name="line-26003"></a>output_arg {
<a name="line-26004"></a>  name: "outputs" number_attr: "num_partitions" type_attr: "T"
<a name="line-26005"></a>}
<a name="line-26006"></a>-}</span>
<a name="line-26007"></a>
<a name="line-26008"></a><a name="reshape"></a><span class='hs-comment'>-- | Reshapes a tensor.</span>
<a name="line-26009"></a><span class='hs-comment'>--</span>
<a name="line-26010"></a><span class='hs-comment'>-- Given `tensor`, this operation returns a tensor that has the same values</span>
<a name="line-26011"></a><span class='hs-comment'>-- as `tensor` with shape `shape`.</span>
<a name="line-26012"></a><span class='hs-comment'>-- </span>
<a name="line-26013"></a><span class='hs-comment'>-- If one component of `shape` is the special value -1, the size of that dimension</span>
<a name="line-26014"></a><span class='hs-comment'>-- is computed so that the total size remains constant.  In particular, a `shape`</span>
<a name="line-26015"></a><span class='hs-comment'>-- of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.</span>
<a name="line-26016"></a><span class='hs-comment'>-- </span>
<a name="line-26017"></a><span class='hs-comment'>-- If `shape` is 1-D or higher, then the operation returns a tensor with shape</span>
<a name="line-26018"></a><span class='hs-comment'>-- `shape` filled with the values of `tensor`. In this case, the number of elements</span>
<a name="line-26019"></a><span class='hs-comment'>-- implied by `shape` must be the same as the number of elements in `tensor`.</span>
<a name="line-26020"></a><span class='hs-comment'>-- </span>
<a name="line-26021"></a><span class='hs-comment'>-- For example:</span>
<a name="line-26022"></a><span class='hs-comment'>-- </span>
<a name="line-26023"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-26024"></a><span class='hs-comment'>-- # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]</span>
<a name="line-26025"></a><span class='hs-comment'>-- # tensor 't' has shape [9]</span>
<a name="line-26026"></a><span class='hs-comment'>-- reshape(t, [3, 3]) ==&gt; [[1, 2, 3],</span>
<a name="line-26027"></a><span class='hs-comment'>--                         [4, 5, 6],</span>
<a name="line-26028"></a><span class='hs-comment'>--                         [7, 8, 9]]</span>
<a name="line-26029"></a><span class='hs-comment'>-- </span>
<a name="line-26030"></a><span class='hs-comment'>-- # tensor 't' is [[[1, 1], [2, 2]],</span>
<a name="line-26031"></a><span class='hs-comment'>-- #                [[3, 3], [4, 4]]]</span>
<a name="line-26032"></a><span class='hs-comment'>-- # tensor 't' has shape [2, 2, 2]</span>
<a name="line-26033"></a><span class='hs-comment'>-- reshape(t, [2, 4]) ==&gt; [[1, 1, 2, 2],</span>
<a name="line-26034"></a><span class='hs-comment'>--                         [3, 3, 4, 4]]</span>
<a name="line-26035"></a><span class='hs-comment'>-- </span>
<a name="line-26036"></a><span class='hs-comment'>-- # tensor 't' is [[[1, 1, 1],</span>
<a name="line-26037"></a><span class='hs-comment'>-- #                 [2, 2, 2]],</span>
<a name="line-26038"></a><span class='hs-comment'>-- #                [[3, 3, 3],</span>
<a name="line-26039"></a><span class='hs-comment'>-- #                 [4, 4, 4]],</span>
<a name="line-26040"></a><span class='hs-comment'>-- #                [[5, 5, 5],</span>
<a name="line-26041"></a><span class='hs-comment'>-- #                 [6, 6, 6]]]</span>
<a name="line-26042"></a><span class='hs-comment'>-- # tensor 't' has shape [3, 2, 3]</span>
<a name="line-26043"></a><span class='hs-comment'>-- # pass '[-1]' to flatten 't'</span>
<a name="line-26044"></a><span class='hs-comment'>-- reshape(t, [-1]) ==&gt; [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]</span>
<a name="line-26045"></a><span class='hs-comment'>-- </span>
<a name="line-26046"></a><span class='hs-comment'>-- # -1 can also be used to infer the shape</span>
<a name="line-26047"></a><span class='hs-comment'>-- </span>
<a name="line-26048"></a><span class='hs-comment'>-- # -1 is inferred to be 9:</span>
<a name="line-26049"></a><span class='hs-comment'>-- reshape(t, [2, -1]) ==&gt; [[1, 1, 1, 2, 2, 2, 3, 3, 3],</span>
<a name="line-26050"></a><span class='hs-comment'>--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]</span>
<a name="line-26051"></a><span class='hs-comment'>-- # -1 is inferred to be 2:</span>
<a name="line-26052"></a><span class='hs-comment'>-- reshape(t, [-1, 9]) ==&gt; [[1, 1, 1, 2, 2, 2, 3, 3, 3],</span>
<a name="line-26053"></a><span class='hs-comment'>--                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]</span>
<a name="line-26054"></a><span class='hs-comment'>-- # -1 is inferred to be 3:</span>
<a name="line-26055"></a><span class='hs-comment'>-- reshape(t, [ 2, -1, 3]) ==&gt; [[[1, 1, 1],</span>
<a name="line-26056"></a><span class='hs-comment'>--                               [2, 2, 2],</span>
<a name="line-26057"></a><span class='hs-comment'>--                               [3, 3, 3]],</span>
<a name="line-26058"></a><span class='hs-comment'>--                              [[4, 4, 4],</span>
<a name="line-26059"></a><span class='hs-comment'>--                               [5, 5, 5],</span>
<a name="line-26060"></a><span class='hs-comment'>--                               [6, 6, 6]]]</span>
<a name="line-26061"></a><span class='hs-comment'>-- </span>
<a name="line-26062"></a><span class='hs-comment'>-- # tensor 't' is [7]</span>
<a name="line-26063"></a><span class='hs-comment'>-- # shape `[]` reshapes to a scalar</span>
<a name="line-26064"></a><span class='hs-comment'>-- reshape(t, []) ==&gt; 7</span>
<a name="line-26065"></a><span class='hs-comment'>-- ```</span>
<a name="line-26066"></a><span class='hs-definition'>reshape</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tshape</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>,</span>
<a name="line-26067"></a>                                    <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-26068"></a>                                            <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26069"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __tensor__</span>
<a name="line-26070"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tshape</span> <span class='hs-comment'>-- ^ __shape__: Defines the shape of the output tensor.</span>
<a name="line-26071"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__</span>
<a name="line-26072"></a><span class='hs-definition'>reshape</span> <span class='hs-varid'>tensor</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26073"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Reshape"</span>
<a name="line-26074"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-26075"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tshape"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tshape</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26076"></a>        <span class='hs-varid'>tensor</span> <span class='hs-varid'>shape</span>
<a name="line-26077"></a><span class='hs-comment'>{-
<a name="line-26078"></a>attr { name: "T" type: "type" }
<a name="line-26079"></a>attr {
<a name="line-26080"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-26081"></a>  default_value { type: DT_INT32 }
<a name="line-26082"></a>  name: "Tshape"
<a name="line-26083"></a>  type: "type"
<a name="line-26084"></a>}
<a name="line-26085"></a>input_arg { name: "tensor" type_attr: "T" }
<a name="line-26086"></a>input_arg {
<a name="line-26087"></a>  description: "Defines the shape of the output tensor."
<a name="line-26088"></a>  name: "shape"
<a name="line-26089"></a>  type_attr: "Tshape"
<a name="line-26090"></a>}
<a name="line-26091"></a>output_arg { name: "output" type_attr: "T" }
<a name="line-26092"></a>-}</span>
<a name="line-26093"></a>
<a name="line-26094"></a><span class='hs-comment'>-- | A Reader that outputs fixed-length records from a file.</span>
<a name="line-26095"></a>
<a name="line-26096"></a><a name="fixedLengthRecordReader"></a><span class='hs-definition'>fixedLengthRecordReader</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __record_bytes__</span>
<a name="line-26097"></a>                           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __reader_handle__: The handle to reference the Reader.</span>
<a name="line-26098"></a><span class='hs-definition'>fixedLengthRecordReader</span> <span class='hs-varid'>record_bytes</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26099"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"FixedLengthRecordReader"</span>
<a name="line-26100"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"record_bytes"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>record_bytes</span><span class='hs-layout'>)</span>
<a name="line-26101"></a>        
<a name="line-26102"></a><span class='hs-comment'>{-
<a name="line-26103"></a>attr { default_value { i: 0 } name: "header_bytes" type: "int" }
<a name="line-26104"></a>attr { name: "record_bytes" type: "int" }
<a name="line-26105"></a>attr { default_value { i: 0 } name: "footer_bytes" type: "int" }
<a name="line-26106"></a>attr {
<a name="line-26107"></a>  default_value { s: "" }
<a name="line-26108"></a>  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
<a name="line-26109"></a>  name: "container"
<a name="line-26110"></a>  type: "string"
<a name="line-26111"></a>}
<a name="line-26112"></a>attr {
<a name="line-26113"></a>  default_value { s: "" }
<a name="line-26114"></a>  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
<a name="line-26115"></a>  name: "shared_name"
<a name="line-26116"></a>  type: "string"
<a name="line-26117"></a>}
<a name="line-26118"></a>output_arg {
<a name="line-26119"></a>  description: "The handle to reference the Reader."
<a name="line-26120"></a>  is_ref: true
<a name="line-26121"></a>  name: "reader_handle"
<a name="line-26122"></a>  type: DT_STRING
<a name="line-26123"></a>}
<a name="line-26124"></a>-}</span>
<a name="line-26125"></a>
<a name="line-26126"></a><a name="sdcaOptimizer"></a><span class='hs-comment'>-- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for</span>
<a name="line-26127"></a><span class='hs-comment'>--</span>
<a name="line-26128"></a><span class='hs-comment'>-- linear models with L1 + L2 regularization. As global optimization objective is</span>
<a name="line-26129"></a><span class='hs-comment'>-- strongly-convex, the optimizer optimizes the dual objective at each step. The</span>
<a name="line-26130"></a><span class='hs-comment'>-- optimizer applies each update one example at a time. Examples are sampled</span>
<a name="line-26131"></a><span class='hs-comment'>-- uniformly, and the optimizer is learning rate free and enjoys linear convergence</span>
<a name="line-26132"></a><span class='hs-comment'>-- rate.</span>
<a name="line-26133"></a><span class='hs-comment'>-- </span>
<a name="line-26134"></a><span class='hs-comment'>-- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong.</span>
<a name="line-26135"></a><span class='hs-comment'>-- 2012 arXiv1211.2717S: <a href="http://arxiv.org/pdf/1211.2717v1.pdf">http://arxiv.org/pdf/1211.2717v1.pdf</a></span>
<a name="line-26136"></a><span class='hs-comment'>-- </span>
<a name="line-26137"></a><span class='hs-comment'>--   Loss objective = \sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|</span>
<a name="line-26138"></a><span class='hs-comment'>-- </span>
<a name="line-26139"></a><span class='hs-comment'>-- Adding vs. Averaging in Distributed Primal-Dual Optimization.</span>
<a name="line-26140"></a><span class='hs-comment'>-- Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik,</span>
<a name="line-26141"></a><span class='hs-comment'>-- Martin Takac <a href="http://arxiv.org/abs/1502.03508">http://arxiv.org/abs/1502.03508</a></span>
<a name="line-26142"></a><span class='hs-comment'>-- </span>
<a name="line-26143"></a><span class='hs-comment'>-- Stochastic Dual Coordinate Ascent with Adaptive Probabilities</span>
<a name="line-26144"></a><span class='hs-comment'>-- Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053</span>
<a name="line-26145"></a><span class='hs-definition'>sdcaOptimizer</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __l1__: Symmetric l1 regularization strength.</span>
<a name="line-26146"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __l2__: Symmetric l2 regularization strength.</span>
<a name="line-26147"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_inner_iterations__: Number of iterations per mini-batch.</span>
<a name="line-26148"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __num_loss_partitions__: Number of partitions of the global loss function.</span>
<a name="line-26149"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __sparse_example_indices__: a list of vectors which contain example indices.</span>
<a name="line-26150"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __sparse_feature_indices__: a list of vectors which contain feature indices.</span>
<a name="line-26151"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __sparse_feature_values__: a list of vectors which contains feature value</span>
<a name="line-26152"></a>                                      <span class='hs-comment'>-- associated with each feature group.</span>
<a name="line-26153"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __dense_features__: a list of matrices which contains the dense feature values.</span>
<a name="line-26154"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __example_weights__: a vector which contains the weight associated with each</span>
<a name="line-26155"></a>                                    <span class='hs-comment'>-- example.</span>
<a name="line-26156"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __example_labels__: a vector which contains the label/target associated with each</span>
<a name="line-26157"></a>                                    <span class='hs-comment'>-- example.</span>
<a name="line-26158"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v7</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __sparse_indices__: a list of vectors where each value is the indices which has</span>
<a name="line-26159"></a>                                               <span class='hs-comment'>-- corresponding weights in sparse_weights. This field maybe ommitted for the</span>
<a name="line-26160"></a>                                               <span class='hs-comment'>-- dense approach.</span>
<a name="line-26161"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v8</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __sparse_weights__: a list of vectors where each value is the weight associated with</span>
<a name="line-26162"></a>                                      <span class='hs-comment'>-- a sparse feature group.</span>
<a name="line-26163"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-varid'>v9</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __dense_weights__: a list of vectors where the values are the weights associated</span>
<a name="line-26164"></a>                                      <span class='hs-comment'>-- with a dense feature group.</span>
<a name="line-26165"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v10</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __example_state_data__: a list of vectors containing the example state data.</span>
<a name="line-26166"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>,</span>
<a name="line-26167"></a>                     <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span>
<a name="line-26168"></a>                 <span class='hs-comment'>-- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__)</span>
<a name="line-26169"></a>                 <span class='hs-comment'>--</span>
<a name="line-26170"></a>                 <span class='hs-comment'>-- * __out_example_state_data__: a list of vectors containing the updated example state</span>
<a name="line-26171"></a>                 <span class='hs-comment'>-- data.</span>
<a name="line-26172"></a>                 <span class='hs-comment'>--</span>
<a name="line-26173"></a>                 <span class='hs-comment'>-- * __out_delta_sparse_weights__: a list of vectors where each value is the delta</span>
<a name="line-26174"></a>                 <span class='hs-comment'>-- weights associated with a sparse feature group.</span>
<a name="line-26175"></a>                 <span class='hs-comment'>--</span>
<a name="line-26176"></a>                 <span class='hs-comment'>-- * __out_delta_dense_weights__: a list of vectors where the values are the delta</span>
<a name="line-26177"></a>                 <span class='hs-comment'>-- weights associated with a dense feature group.</span>
<a name="line-26178"></a><span class='hs-definition'>sdcaOptimizer</span> <span class='hs-varid'>l1</span> <span class='hs-varid'>l2</span> <span class='hs-varid'>num_inner_iterations</span> <span class='hs-varid'>num_loss_partitions</span>
<a name="line-26179"></a>              <span class='hs-varid'>sparse_example_indices</span> <span class='hs-varid'>sparse_feature_indices</span>
<a name="line-26180"></a>              <span class='hs-varid'>sparse_feature_values</span> <span class='hs-varid'>dense_features</span> <span class='hs-varid'>example_weights</span>
<a name="line-26181"></a>              <span class='hs-varid'>example_labels</span> <span class='hs-varid'>sparse_indices</span> <span class='hs-varid'>sparse_weights</span> <span class='hs-varid'>dense_weights</span>
<a name="line-26182"></a>              <span class='hs-varid'>example_state_data</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"num_sparse_features"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"sparse_example_indices"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>sparse_example_indices</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26183"></a>                                                                           <span class='hs-layout'>(</span><span class='hs-str'>"sparse_feature_indices"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>sparse_feature_indices</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26184"></a>                                                                           <span class='hs-layout'>(</span><span class='hs-str'>"sparse_indices"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>sparse_indices</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26185"></a>                                                                           <span class='hs-layout'>(</span><span class='hs-str'>"sparse_weights"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>sparse_weights</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26186"></a>                                                  <span class='hs-layout'>(</span><span class='hs-str'>"num_sparse_features_with_values"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"sparse_feature_values"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>sparse_feature_values</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26187"></a>                                                  <span class='hs-layout'>(</span><span class='hs-str'>"num_dense_features"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"dense_features"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>dense_features</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26188"></a>                                                                          <span class='hs-layout'>(</span><span class='hs-str'>"dense_weights"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>dense_weights</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26189"></a>    <span class='hs-varid'>buildListOp</span> <span class='hs-keyglyph'>[</span><span class='hs-varid'>num_sparse_features</span><span class='hs-layout'>,</span> <span class='hs-varid'>num_dense_features</span><span class='hs-keyglyph'>]</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SdcaOptimizer"</span>
<a name="line-26190"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"l1"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>l1</span>
<a name="line-26191"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"l2"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>l2</span>
<a name="line-26192"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_inner_iterations"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_inner_iterations</span>
<a name="line-26193"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_loss_partitions"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_loss_partitions</span>
<a name="line-26194"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sparse_features"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sparse_features</span>
<a name="line-26195"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_sparse_features_with_values"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_sparse_features_with_values</span>
<a name="line-26196"></a>                                                           <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"num_dense_features"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>num_dense_features</span><span class='hs-layout'>)</span>
<a name="line-26197"></a>        <span class='hs-varid'>sparse_example_indices</span> <span class='hs-varid'>sparse_feature_indices</span> <span class='hs-varid'>sparse_feature_values</span>
<a name="line-26198"></a>        <span class='hs-varid'>dense_features</span> <span class='hs-varid'>example_weights</span> <span class='hs-varid'>example_labels</span> <span class='hs-varid'>sparse_indices</span>
<a name="line-26199"></a>        <span class='hs-varid'>sparse_weights</span> <span class='hs-varid'>dense_weights</span> <span class='hs-varid'>example_state_data</span>
<a name="line-26200"></a>  <span class='hs-keyword'>where</span>
<a name="line-26201"></a>    <span class='hs-varid'>num_sparse_features</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>sparse_example_indices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-26202"></a>    <span class='hs-varid'>num_sparse_features_with_values</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>sparse_feature_values</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-26203"></a>    <span class='hs-varid'>num_dense_features</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>dense_features</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-26204"></a><span class='hs-comment'>{-
<a name="line-26205"></a>attr {
<a name="line-26206"></a>  allowed_values {
<a name="line-26207"></a>    list {
<a name="line-26208"></a>      s: "logistic_loss"
<a name="line-26209"></a>      s: "squared_loss"
<a name="line-26210"></a>      s: "hinge_loss"
<a name="line-26211"></a>      s: "smooth_hinge_loss"
<a name="line-26212"></a>    }
<a name="line-26213"></a>  }
<a name="line-26214"></a>  description: "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses."
<a name="line-26215"></a>  name: "loss_type"
<a name="line-26216"></a>  type: "string"
<a name="line-26217"></a>}
<a name="line-26218"></a>attr {
<a name="line-26219"></a>  default_value { b: false }
<a name="line-26220"></a>  description: "Whether to use Adapative SDCA for the inner loop."
<a name="line-26221"></a>  name: "adaptative"
<a name="line-26222"></a>  type: "bool"
<a name="line-26223"></a>}
<a name="line-26224"></a>attr {
<a name="line-26225"></a>  description: "Number of sparse feature groups to train on."
<a name="line-26226"></a>  has_minimum: true
<a name="line-26227"></a>  name: "num_sparse_features"
<a name="line-26228"></a>  type: "int"
<a name="line-26229"></a>}
<a name="line-26230"></a>attr {
<a name="line-26231"></a>  description: "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0."
<a name="line-26232"></a>  has_minimum: true
<a name="line-26233"></a>  name: "num_sparse_features_with_values"
<a name="line-26234"></a>  type: "int"
<a name="line-26235"></a>}
<a name="line-26236"></a>attr {
<a name="line-26237"></a>  description: "Number of dense feature groups to train on."
<a name="line-26238"></a>  has_minimum: true
<a name="line-26239"></a>  name: "num_dense_features"
<a name="line-26240"></a>  type: "int"
<a name="line-26241"></a>}
<a name="line-26242"></a>attr {
<a name="line-26243"></a>  description: "Symmetric l1 regularization strength."
<a name="line-26244"></a>  name: "l1"
<a name="line-26245"></a>  type: "float"
<a name="line-26246"></a>}
<a name="line-26247"></a>attr {
<a name="line-26248"></a>  description: "Symmetric l2 regularization strength."
<a name="line-26249"></a>  name: "l2"
<a name="line-26250"></a>  type: "float"
<a name="line-26251"></a>}
<a name="line-26252"></a>attr {
<a name="line-26253"></a>  description: "Number of partitions of the global loss function."
<a name="line-26254"></a>  has_minimum: true
<a name="line-26255"></a>  minimum: 1
<a name="line-26256"></a>  name: "num_loss_partitions"
<a name="line-26257"></a>  type: "int"
<a name="line-26258"></a>}
<a name="line-26259"></a>attr {
<a name="line-26260"></a>  description: "Number of iterations per mini-batch."
<a name="line-26261"></a>  has_minimum: true
<a name="line-26262"></a>  minimum: 1
<a name="line-26263"></a>  name: "num_inner_iterations"
<a name="line-26264"></a>  type: "int"
<a name="line-26265"></a>}
<a name="line-26266"></a>input_arg {
<a name="line-26267"></a>  description: "a list of vectors which contain example indices."
<a name="line-26268"></a>  name: "sparse_example_indices"
<a name="line-26269"></a>  number_attr: "num_sparse_features"
<a name="line-26270"></a>  type: DT_INT64
<a name="line-26271"></a>}
<a name="line-26272"></a>input_arg {
<a name="line-26273"></a>  description: "a list of vectors which contain feature indices."
<a name="line-26274"></a>  name: "sparse_feature_indices"
<a name="line-26275"></a>  number_attr: "num_sparse_features"
<a name="line-26276"></a>  type: DT_INT64
<a name="line-26277"></a>}
<a name="line-26278"></a>input_arg {
<a name="line-26279"></a>  description: "a list of vectors which contains feature value\nassociated with each feature group."
<a name="line-26280"></a>  name: "sparse_feature_values"
<a name="line-26281"></a>  number_attr: "num_sparse_features_with_values"
<a name="line-26282"></a>  type: DT_FLOAT
<a name="line-26283"></a>}
<a name="line-26284"></a>input_arg {
<a name="line-26285"></a>  description: "a list of matrices which contains the dense feature values."
<a name="line-26286"></a>  name: "dense_features"
<a name="line-26287"></a>  number_attr: "num_dense_features"
<a name="line-26288"></a>  type: DT_FLOAT
<a name="line-26289"></a>}
<a name="line-26290"></a>input_arg {
<a name="line-26291"></a>  description: "a vector which contains the weight associated with each\nexample."
<a name="line-26292"></a>  name: "example_weights"
<a name="line-26293"></a>  type: DT_FLOAT
<a name="line-26294"></a>}
<a name="line-26295"></a>input_arg {
<a name="line-26296"></a>  description: "a vector which contains the label/target associated with each\nexample."
<a name="line-26297"></a>  name: "example_labels"
<a name="line-26298"></a>  type: DT_FLOAT
<a name="line-26299"></a>}
<a name="line-26300"></a>input_arg {
<a name="line-26301"></a>  description: "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe ommitted for the\ndense approach."
<a name="line-26302"></a>  name: "sparse_indices"
<a name="line-26303"></a>  number_attr: "num_sparse_features"
<a name="line-26304"></a>  type: DT_INT64
<a name="line-26305"></a>}
<a name="line-26306"></a>input_arg {
<a name="line-26307"></a>  description: "a list of vectors where each value is the weight associated with\na sparse feature group."
<a name="line-26308"></a>  name: "sparse_weights"
<a name="line-26309"></a>  number_attr: "num_sparse_features"
<a name="line-26310"></a>  type: DT_FLOAT
<a name="line-26311"></a>}
<a name="line-26312"></a>input_arg {
<a name="line-26313"></a>  description: "a list of vectors where the values are the weights associated\nwith a dense feature group."
<a name="line-26314"></a>  name: "dense_weights"
<a name="line-26315"></a>  number_attr: "num_dense_features"
<a name="line-26316"></a>  type: DT_FLOAT
<a name="line-26317"></a>}
<a name="line-26318"></a>input_arg {
<a name="line-26319"></a>  description: "a list of vectors containing the example state data."
<a name="line-26320"></a>  name: "example_state_data"
<a name="line-26321"></a>  type: DT_FLOAT
<a name="line-26322"></a>}
<a name="line-26323"></a>output_arg {
<a name="line-26324"></a>  description: "a list of vectors containing the updated example state\ndata."
<a name="line-26325"></a>  name: "out_example_state_data"
<a name="line-26326"></a>  type: DT_FLOAT
<a name="line-26327"></a>}
<a name="line-26328"></a>output_arg {
<a name="line-26329"></a>  description: "a list of vectors where each value is the delta\nweights associated with a sparse feature group."
<a name="line-26330"></a>  name: "out_delta_sparse_weights"
<a name="line-26331"></a>  number_attr: "num_sparse_features"
<a name="line-26332"></a>  type: DT_FLOAT
<a name="line-26333"></a>}
<a name="line-26334"></a>output_arg {
<a name="line-26335"></a>  description: "a list of vectors where the values are the delta\nweights associated with a dense feature group."
<a name="line-26336"></a>  name: "out_delta_dense_weights"
<a name="line-26337"></a>  number_attr: "num_dense_features"
<a name="line-26338"></a>  type: DT_FLOAT
<a name="line-26339"></a>}
<a name="line-26340"></a>-}</span>
<a name="line-26341"></a>
<a name="line-26342"></a><a name="resizeArea"></a><span class='hs-comment'>-- | Resize `images` to `size` using area interpolation.</span>
<a name="line-26343"></a><span class='hs-comment'>--</span>
<a name="line-26344"></a><span class='hs-comment'>-- Input images can be of different types but output images are always float.</span>
<a name="line-26345"></a><span class='hs-definition'>resizeArea</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-26346"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-26347"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-26348"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-26349"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-26350"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-26351"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26352"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __images__: 4-D with shape `[batch, height, width, channels]`.</span>
<a name="line-26353"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The</span>
<a name="line-26354"></a>                                          <span class='hs-comment'>-- new size for the images.</span>
<a name="line-26355"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __resized_images__: 4-D with shape</span>
<a name="line-26356"></a>              <span class='hs-comment'>-- `[batch, new_height, new_width, channels]`.</span>
<a name="line-26357"></a><span class='hs-definition'>resizeArea</span> <span class='hs-varid'>images</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26358"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ResizeArea"</span>
<a name="line-26359"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26360"></a>        <span class='hs-varid'>images</span> <span class='hs-varid'>size</span>
<a name="line-26361"></a><span class='hs-comment'>{-
<a name="line-26362"></a>attr {
<a name="line-26363"></a>  allowed_values {
<a name="line-26364"></a>    list {
<a name="line-26365"></a>      type: DT_UINT8
<a name="line-26366"></a>      type: DT_INT8
<a name="line-26367"></a>      type: DT_INT16
<a name="line-26368"></a>      type: DT_INT32
<a name="line-26369"></a>      type: DT_INT64
<a name="line-26370"></a>      type: DT_HALF
<a name="line-26371"></a>      type: DT_FLOAT
<a name="line-26372"></a>      type: DT_DOUBLE
<a name="line-26373"></a>    }
<a name="line-26374"></a>  }
<a name="line-26375"></a>  name: "T"
<a name="line-26376"></a>  type: "type"
<a name="line-26377"></a>}
<a name="line-26378"></a>attr {
<a name="line-26379"></a>  default_value { b: false }
<a name="line-26380"></a>  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
<a name="line-26381"></a>  name: "align_corners"
<a name="line-26382"></a>  type: "bool"
<a name="line-26383"></a>}
<a name="line-26384"></a>input_arg {
<a name="line-26385"></a>  description: "4-D with shape `[batch, height, width, channels]`."
<a name="line-26386"></a>  name: "images"
<a name="line-26387"></a>  type_attr: "T"
<a name="line-26388"></a>}
<a name="line-26389"></a>input_arg {
<a name="line-26390"></a>  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
<a name="line-26391"></a>  name: "size"
<a name="line-26392"></a>  type: DT_INT32
<a name="line-26393"></a>}
<a name="line-26394"></a>output_arg {
<a name="line-26395"></a>  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
<a name="line-26396"></a>  name: "resized_images"
<a name="line-26397"></a>  type: DT_FLOAT
<a name="line-26398"></a>}
<a name="line-26399"></a>-}</span>
<a name="line-26400"></a>
<a name="line-26401"></a><a name="linSpace"></a><span class='hs-comment'>-- | Generates values in an interval.</span>
<a name="line-26402"></a><span class='hs-comment'>--</span>
<a name="line-26403"></a><span class='hs-comment'>-- A sequence of `num` evenly-spaced values are generated beginning at `start`.</span>
<a name="line-26404"></a><span class='hs-comment'>-- If `num &gt; 1`, the values in the sequence increase by `stop - start / num - 1`,</span>
<a name="line-26405"></a><span class='hs-comment'>-- so that the last one is exactly `stop`.</span>
<a name="line-26406"></a><span class='hs-comment'>-- </span>
<a name="line-26407"></a><span class='hs-comment'>-- For example:</span>
<a name="line-26408"></a><span class='hs-comment'>-- </span>
<a name="line-26409"></a><span class='hs-comment'>-- ```</span>
<a name="line-26410"></a><span class='hs-comment'>-- tf.linspace(10.0, 12.0, 3, name="linspace") =&gt; [ 10.0  11.0  12.0]</span>
<a name="line-26411"></a><span class='hs-comment'>-- ```</span>
<a name="line-26412"></a><span class='hs-definition'>linSpace</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-26413"></a>                                      <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-26414"></a>                                                               <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26415"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __start__: First entry in the range.</span>
<a name="line-26416"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __stop__: Last entry in the range.</span>
<a name="line-26417"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __num__: Number of values to generate.</span>
<a name="line-26418"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D. The generated values.</span>
<a name="line-26419"></a><span class='hs-definition'>linSpace</span> <span class='hs-varid'>start</span> <span class='hs-varid'>stop</span> <span class='hs-varid'>num</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26420"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LinSpace"</span>
<a name="line-26421"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-26422"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26423"></a>        <span class='hs-varid'>start</span> <span class='hs-varid'>stop</span> <span class='hs-varid'>num</span>
<a name="line-26424"></a><span class='hs-comment'>{-
<a name="line-26425"></a>attr {
<a name="line-26426"></a>  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
<a name="line-26427"></a>  name: "T"
<a name="line-26428"></a>  type: "type"
<a name="line-26429"></a>}
<a name="line-26430"></a>attr {
<a name="line-26431"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-26432"></a>  default_value { type: DT_INT32 }
<a name="line-26433"></a>  name: "Tidx"
<a name="line-26434"></a>  type: "type"
<a name="line-26435"></a>}
<a name="line-26436"></a>input_arg {
<a name="line-26437"></a>  description: "First entry in the range."
<a name="line-26438"></a>  name: "start"
<a name="line-26439"></a>  type_attr: "T"
<a name="line-26440"></a>}
<a name="line-26441"></a>input_arg {
<a name="line-26442"></a>  description: "Last entry in the range." name: "stop" type_attr: "T"
<a name="line-26443"></a>}
<a name="line-26444"></a>input_arg {
<a name="line-26445"></a>  description: "Number of values to generate."
<a name="line-26446"></a>  name: "num"
<a name="line-26447"></a>  type_attr: "Tidx"
<a name="line-26448"></a>}
<a name="line-26449"></a>output_arg {
<a name="line-26450"></a>  description: "1-D. The generated values."
<a name="line-26451"></a>  name: "output"
<a name="line-26452"></a>  type_attr: "T"
<a name="line-26453"></a>}
<a name="line-26454"></a>-}</span>
<a name="line-26455"></a>
<a name="line-26456"></a><a name="cTCLoss"></a><span class='hs-comment'>-- | Calculates the CTC Loss (log probability) for each batch entry.  Also calculates</span>
<a name="line-26457"></a><span class='hs-comment'>--</span>
<a name="line-26458"></a><span class='hs-comment'>-- the gradient.  This class performs the softmax operation for you, so inputs</span>
<a name="line-26459"></a><span class='hs-comment'>-- should be e.g. linear projections of outputs by an LSTM.</span>
<a name="line-26460"></a><span class='hs-definition'>cTCLoss</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Float</span> <span class='hs-comment'>-- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.</span>
<a name="line-26461"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __labels_indices__: The indices of a `SparseTensor&lt;int32, 2&gt;`.</span>
<a name="line-26462"></a>                                       <span class='hs-comment'>-- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for</span>
<a name="line-26463"></a>                                       <span class='hs-comment'>-- `(batch b, time t)`.</span>
<a name="line-26464"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __labels_values__: The values (labels) associated with the given batch and time.</span>
<a name="line-26465"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __sequence_length__: A vector containing sequence lengths (batch).</span>
<a name="line-26466"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span>
<a name="line-26467"></a>           <span class='hs-comment'>-- ^ (__loss__, __gradient__)</span>
<a name="line-26468"></a>           <span class='hs-comment'>--</span>
<a name="line-26469"></a>           <span class='hs-comment'>-- * __loss__: A vector (batch) containing log-probabilities.</span>
<a name="line-26470"></a>           <span class='hs-comment'>--</span>
<a name="line-26471"></a>           <span class='hs-comment'>-- * __gradient__: The gradient of `loss`.  3-D, shape:</span>
<a name="line-26472"></a>           <span class='hs-comment'>-- `(max_time x batch_size x num_classes)`.</span>
<a name="line-26473"></a><span class='hs-definition'>cTCLoss</span> <span class='hs-varid'>inputs</span> <span class='hs-varid'>labels_indices</span> <span class='hs-varid'>labels_values</span> <span class='hs-varid'>sequence_length</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26474"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"CTCLoss"</span><span class='hs-layout'>)</span>
<a name="line-26475"></a>        <span class='hs-varid'>inputs</span> <span class='hs-varid'>labels_indices</span> <span class='hs-varid'>labels_values</span> <span class='hs-varid'>sequence_length</span>
<a name="line-26476"></a><span class='hs-comment'>{-
<a name="line-26477"></a>attr {
<a name="line-26478"></a>  default_value { b: false }
<a name="line-26479"></a>  description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation."
<a name="line-26480"></a>  name: "preprocess_collapse_repeated"
<a name="line-26481"></a>  type: "bool"
<a name="line-26482"></a>}
<a name="line-26483"></a>attr {
<a name="line-26484"></a>  default_value { b: true }
<a name="line-26485"></a>  description: "Scalar.  If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels.  This is a simplified version of CTC."
<a name="line-26486"></a>  name: "ctc_merge_repeated"
<a name="line-26487"></a>  type: "bool"
<a name="line-26488"></a>}
<a name="line-26489"></a>input_arg {
<a name="line-26490"></a>  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
<a name="line-26491"></a>  name: "inputs"
<a name="line-26492"></a>  type: DT_FLOAT
<a name="line-26493"></a>}
<a name="line-26494"></a>input_arg {
<a name="line-26495"></a>  description: "The indices of a `SparseTensor&lt;int32, 2&gt;`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`."
<a name="line-26496"></a>  name: "labels_indices"
<a name="line-26497"></a>  type: DT_INT64
<a name="line-26498"></a>}
<a name="line-26499"></a>input_arg {
<a name="line-26500"></a>  description: "The values (labels) associated with the given batch and time."
<a name="line-26501"></a>  name: "labels_values"
<a name="line-26502"></a>  type: DT_INT32
<a name="line-26503"></a>}
<a name="line-26504"></a>input_arg {
<a name="line-26505"></a>  description: "A vector containing sequence lengths (batch)."
<a name="line-26506"></a>  name: "sequence_length"
<a name="line-26507"></a>  type: DT_INT32
<a name="line-26508"></a>}
<a name="line-26509"></a>output_arg {
<a name="line-26510"></a>  description: "A vector (batch) containing log-probabilities."
<a name="line-26511"></a>  name: "loss"
<a name="line-26512"></a>  type: DT_FLOAT
<a name="line-26513"></a>}
<a name="line-26514"></a>output_arg {
<a name="line-26515"></a>  description: "The gradient of `loss`.  3-D, shape:\n`(max_time x batch_size x num_classes)`."
<a name="line-26516"></a>  name: "gradient"
<a name="line-26517"></a>  type: DT_FLOAT
<a name="line-26518"></a>}
<a name="line-26519"></a>-}</span>
<a name="line-26520"></a>
<a name="line-26521"></a><a name="matrixDiagPart"></a><span class='hs-comment'>-- | Returns the batched diagonal part of a batched tensor.</span>
<a name="line-26522"></a><span class='hs-comment'>--</span>
<a name="line-26523"></a><span class='hs-comment'>-- This operation returns a tensor with the `diagonal` part</span>
<a name="line-26524"></a><span class='hs-comment'>-- of the batched `input`. The `diagonal` part is computed as follows:</span>
<a name="line-26525"></a><span class='hs-comment'>-- </span>
<a name="line-26526"></a><span class='hs-comment'>-- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a</span>
<a name="line-26527"></a><span class='hs-comment'>-- tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:</span>
<a name="line-26528"></a><span class='hs-comment'>-- </span>
<a name="line-26529"></a><span class='hs-comment'>-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.</span>
<a name="line-26530"></a><span class='hs-comment'>-- </span>
<a name="line-26531"></a><span class='hs-comment'>-- The input must be at least a matrix.</span>
<a name="line-26532"></a><span class='hs-comment'>-- </span>
<a name="line-26533"></a><span class='hs-comment'>-- For example:</span>
<a name="line-26534"></a><span class='hs-comment'>-- </span>
<a name="line-26535"></a><span class='hs-comment'>-- ```prettyprint</span>
<a name="line-26536"></a><span class='hs-comment'>-- # 'input' is [[[1, 0, 0, 0]</span>
<a name="line-26537"></a><span class='hs-comment'>--                [0, 2, 0, 0]</span>
<a name="line-26538"></a><span class='hs-comment'>--                [0, 0, 3, 0]</span>
<a name="line-26539"></a><span class='hs-comment'>--                [0, 0, 0, 4]],</span>
<a name="line-26540"></a><span class='hs-comment'>--               [[5, 0, 0, 0]</span>
<a name="line-26541"></a><span class='hs-comment'>--                [0, 6, 0, 0]</span>
<a name="line-26542"></a><span class='hs-comment'>--                [0, 0, 7, 0]</span>
<a name="line-26543"></a><span class='hs-comment'>--                [0, 0, 0, 8]]]</span>
<a name="line-26544"></a><span class='hs-comment'>-- </span>
<a name="line-26545"></a><span class='hs-comment'>-- and input.shape = (2, 4, 4)</span>
<a name="line-26546"></a><span class='hs-comment'>-- </span>
<a name="line-26547"></a><span class='hs-comment'>-- tf.matrix_diag_part(input) ==&gt; [[1, 2, 3, 4], [5, 6, 7, 8]]</span>
<a name="line-26548"></a><span class='hs-comment'>-- </span>
<a name="line-26549"></a><span class='hs-comment'>-- which has shape (2, 4)</span>
<a name="line-26550"></a><span class='hs-comment'>-- ```</span>
<a name="line-26551"></a><span class='hs-definition'>matrixDiagPart</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26552"></a>                  <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: Rank `k` tensor where `k &gt;= 2`.</span>
<a name="line-26553"></a>                  <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __diagonal__: The extracted diagonal(s) having shape</span>
<a name="line-26554"></a>                  <span class='hs-comment'>-- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.</span>
<a name="line-26555"></a><span class='hs-definition'>matrixDiagPart</span> <span class='hs-varid'>input</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26556"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"MatrixDiagPart"</span>
<a name="line-26557"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26558"></a>        <span class='hs-varid'>input</span>
<a name="line-26559"></a><span class='hs-comment'>{-
<a name="line-26560"></a>attr { name: "T" type: "type" }
<a name="line-26561"></a>input_arg {
<a name="line-26562"></a>  description: "Rank `k` tensor where `k &gt;= 2`."
<a name="line-26563"></a>  name: "input"
<a name="line-26564"></a>  type_attr: "T"
<a name="line-26565"></a>}
<a name="line-26566"></a>output_arg {
<a name="line-26567"></a>  description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`."
<a name="line-26568"></a>  name: "diagonal"
<a name="line-26569"></a>  type_attr: "T"
<a name="line-26570"></a>}
<a name="line-26571"></a>-}</span>
<a name="line-26572"></a>
<a name="line-26573"></a><a name="enter"></a><span class='hs-comment'>-- | Creates or finds a child frame, and makes `data` available to the child frame.</span>
<a name="line-26574"></a><span class='hs-comment'>--</span>
<a name="line-26575"></a><span class='hs-comment'>-- This op is used together with `Exit` to create loops in the graph.</span>
<a name="line-26576"></a><span class='hs-comment'>-- The unique `frame_name` is used by the `Executor` to identify frames. If</span>
<a name="line-26577"></a><span class='hs-comment'>-- `is_constant` is true, `output` is a constant in the child frame; otherwise</span>
<a name="line-26578"></a><span class='hs-comment'>-- it may be changed in the child frame. At most `parallel_iterations` iterations</span>
<a name="line-26579"></a><span class='hs-comment'>-- are run in parallel in the child frame.</span>
<a name="line-26580"></a><span class='hs-definition'>enter</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26581"></a>         <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the child frame.</span>
<a name="line-26582"></a>         <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-26583"></a><span class='hs-definition'>enter</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26584"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Enter"</span>
<a name="line-26585"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26586"></a>        <span class='hs-varid'>data'</span>
<a name="line-26587"></a><span class='hs-comment'>{-
<a name="line-26588"></a>attr { name: "T" type: "type" }
<a name="line-26589"></a>attr {
<a name="line-26590"></a>  description: "The name of the child frame."
<a name="line-26591"></a>  name: "frame_name"
<a name="line-26592"></a>  type: "string"
<a name="line-26593"></a>}
<a name="line-26594"></a>attr {
<a name="line-26595"></a>  default_value { b: false }
<a name="line-26596"></a>  description: "If true, the output is constant within the child frame."
<a name="line-26597"></a>  name: "is_constant"
<a name="line-26598"></a>  type: "bool"
<a name="line-26599"></a>}
<a name="line-26600"></a>attr {
<a name="line-26601"></a>  default_value { i: 10 }
<a name="line-26602"></a>  description: "The number of iterations allowed to run in parallel."
<a name="line-26603"></a>  name: "parallel_iterations"
<a name="line-26604"></a>  type: "int"
<a name="line-26605"></a>}
<a name="line-26606"></a>input_arg {
<a name="line-26607"></a>  description: "The tensor to be made available to the child frame."
<a name="line-26608"></a>  name: "data"
<a name="line-26609"></a>  type_attr: "T"
<a name="line-26610"></a>}
<a name="line-26611"></a>output_arg {
<a name="line-26612"></a>  description: "The same tensor as `data`."
<a name="line-26613"></a>  name: "output"
<a name="line-26614"></a>  type_attr: "T"
<a name="line-26615"></a>}
<a name="line-26616"></a>-}</span>
<a name="line-26617"></a>
<a name="line-26618"></a><a name="encodePng"></a><span class='hs-comment'>-- | PNG-encode an image.</span>
<a name="line-26619"></a><span class='hs-comment'>--</span>
<a name="line-26620"></a><span class='hs-comment'>-- `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`</span>
<a name="line-26621"></a><span class='hs-comment'>-- where `channels` is:</span>
<a name="line-26622"></a><span class='hs-comment'>-- </span>
<a name="line-26623"></a><span class='hs-comment'>-- *   1: for grayscale.</span>
<a name="line-26624"></a><span class='hs-comment'>-- *   2: for grayscale + alpha.</span>
<a name="line-26625"></a><span class='hs-comment'>-- *   3: for RGB.</span>
<a name="line-26626"></a><span class='hs-comment'>-- *   4: for RGBA.</span>
<a name="line-26627"></a><span class='hs-comment'>-- </span>
<a name="line-26628"></a><span class='hs-comment'>-- The ZLIB compression level, `compression`, can be -1 for the PNG-encoder</span>
<a name="line-26629"></a><span class='hs-comment'>-- default or a value from 0 to 9.  9 is the highest compression level, generating</span>
<a name="line-26630"></a><span class='hs-comment'>-- the smallest output, but is slower.</span>
<a name="line-26631"></a><span class='hs-definition'>encodePng</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-26632"></a>                                                  <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26633"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __image__: 3-D with shape `[height, width, channels]`.</span>
<a name="line-26634"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: 0-D. PNG-encoded image.</span>
<a name="line-26635"></a><span class='hs-definition'>encodePng</span> <span class='hs-varid'>image</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26636"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"EncodePng"</span>
<a name="line-26637"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26638"></a>        <span class='hs-varid'>image</span>
<a name="line-26639"></a><span class='hs-comment'>{-
<a name="line-26640"></a>attr {
<a name="line-26641"></a>  default_value { i: -1 }
<a name="line-26642"></a>  description: "Compression level."
<a name="line-26643"></a>  name: "compression"
<a name="line-26644"></a>  type: "int"
<a name="line-26645"></a>}
<a name="line-26646"></a>attr {
<a name="line-26647"></a>  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
<a name="line-26648"></a>  default_value { type: DT_UINT8 }
<a name="line-26649"></a>  name: "T"
<a name="line-26650"></a>  type: "type"
<a name="line-26651"></a>}
<a name="line-26652"></a>input_arg {
<a name="line-26653"></a>  description: "3-D with shape `[height, width, channels]`."
<a name="line-26654"></a>  name: "image"
<a name="line-26655"></a>  type_attr: "T"
<a name="line-26656"></a>}
<a name="line-26657"></a>output_arg {
<a name="line-26658"></a>  description: "0-D. PNG-encoded image."
<a name="line-26659"></a>  name: "contents"
<a name="line-26660"></a>  type: DT_STRING
<a name="line-26661"></a>}
<a name="line-26662"></a>-}</span>
<a name="line-26663"></a>
<a name="line-26664"></a><a name="exit"></a><span class='hs-comment'>-- | Exits the current frame to its parent frame.</span>
<a name="line-26665"></a><span class='hs-comment'>--</span>
<a name="line-26666"></a><span class='hs-comment'>-- Exit makes its input `data` available to the parent frame.</span>
<a name="line-26667"></a><span class='hs-definition'>exit</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26668"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the parent frame.</span>
<a name="line-26669"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-26670"></a><span class='hs-definition'>exit</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26671"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Exit"</span>
<a name="line-26672"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26673"></a>        <span class='hs-varid'>data'</span>
<a name="line-26674"></a><span class='hs-comment'>{-
<a name="line-26675"></a>attr { name: "T" type: "type" }
<a name="line-26676"></a>input_arg {
<a name="line-26677"></a>  description: "The tensor to be made available to the parent frame."
<a name="line-26678"></a>  name: "data"
<a name="line-26679"></a>  type_attr: "T"
<a name="line-26680"></a>}
<a name="line-26681"></a>output_arg {
<a name="line-26682"></a>  description: "The same tensor as `data`."
<a name="line-26683"></a>  name: "output"
<a name="line-26684"></a>  type_attr: "T"
<a name="line-26685"></a>}
<a name="line-26686"></a>-}</span>
<a name="line-26687"></a>
<a name="line-26688"></a><a name="scatterNd"></a><span class='hs-comment'>-- | Creates a new tensor by applying sparse `updates` to individual</span>
<a name="line-26689"></a><span class='hs-comment'>--</span>
<a name="line-26690"></a><span class='hs-comment'>-- values or slices within a zero tensor of the given `shape` tensor according to</span>
<a name="line-26691"></a><span class='hs-comment'>-- indices.  This operator is the inverse of the [tf.gather_nd](#gather_nd)</span>
<a name="line-26692"></a><span class='hs-comment'>-- operator which extracts values or slices from a given tensor.</span>
<a name="line-26693"></a><span class='hs-comment'>-- </span>
<a name="line-26694"></a><span class='hs-comment'>-- TODO(simister): Add a link to Variable.__getitem__ documentation on slice</span>
<a name="line-26695"></a><span class='hs-comment'>-- syntax.</span>
<a name="line-26696"></a><span class='hs-comment'>-- </span>
<a name="line-26697"></a><span class='hs-comment'>-- `shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank</span>
<a name="line-26698"></a><span class='hs-comment'>-- `Q`.</span>
<a name="line-26699"></a><span class='hs-comment'>-- </span>
<a name="line-26700"></a><span class='hs-comment'>-- `indices` must be integer tensor, containing indices into `shape`.</span>
<a name="line-26701"></a><span class='hs-comment'>-- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt; K &lt;= P`.</span>
<a name="line-26702"></a><span class='hs-comment'>-- </span>
<a name="line-26703"></a><span class='hs-comment'>-- The innermost dimension of `indices` (with length `K`) corresponds to</span>
<a name="line-26704"></a><span class='hs-comment'>-- indices into elements (if `K = P`) or slices (if `K &lt; P`) along the `K`th</span>
<a name="line-26705"></a><span class='hs-comment'>-- dimension of `shape`.</span>
<a name="line-26706"></a><span class='hs-comment'>-- </span>
<a name="line-26707"></a><span class='hs-comment'>-- `updates` is Tensor of rank `Q-1+P-K` with shape:</span>
<a name="line-26708"></a><span class='hs-comment'>-- </span>
<a name="line-26709"></a><span class='hs-comment'>-- ```</span>
<a name="line-26710"></a><span class='hs-comment'>-- [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].</span>
<a name="line-26711"></a><span class='hs-comment'>-- ```</span>
<a name="line-26712"></a><span class='hs-comment'>-- </span>
<a name="line-26713"></a><span class='hs-comment'>-- The simplest form of scatter is to insert individual elements in a tensor by</span>
<a name="line-26714"></a><span class='hs-comment'>-- index. For example, say we want to insert 4 scattered elements in a rank-1</span>
<a name="line-26715"></a><span class='hs-comment'>-- tensor with 8 elements.</span>
<a name="line-26716"></a><span class='hs-comment'>-- </span>
<a name="line-26717"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-26718"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterNd1.png" alt&gt;</span>
<a name="line-26719"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-26720"></a><span class='hs-comment'>-- </span>
<a name="line-26721"></a><span class='hs-comment'>-- In Python, this scatter operation would look like this:</span>
<a name="line-26722"></a><span class='hs-comment'>-- </span>
<a name="line-26723"></a><span class='hs-comment'>--     indices = tf.constant([[4], [3], [1], [7]])</span>
<a name="line-26724"></a><span class='hs-comment'>--     updates = tf.constant([9, 10, 11, 12])</span>
<a name="line-26725"></a><span class='hs-comment'>--     shape = tf.constant([8])</span>
<a name="line-26726"></a><span class='hs-comment'>--     scatter = tf.scatter_nd(indices, updates, shape)</span>
<a name="line-26727"></a><span class='hs-comment'>--     with tf.Session() as sess:</span>
<a name="line-26728"></a><span class='hs-comment'>--       print sess.run(scatter)</span>
<a name="line-26729"></a><span class='hs-comment'>-- </span>
<a name="line-26730"></a><span class='hs-comment'>-- The resulting tensor would look like this:</span>
<a name="line-26731"></a><span class='hs-comment'>-- </span>
<a name="line-26732"></a><span class='hs-comment'>--     [0, 11, 0, 10, 9, 0, 0, 12]</span>
<a name="line-26733"></a><span class='hs-comment'>-- </span>
<a name="line-26734"></a><span class='hs-comment'>-- We can also, insert entire slices of a higher rank tensor all at once. For</span>
<a name="line-26735"></a><span class='hs-comment'>-- example, if we wanted to insert two slices in the first dimension of a</span>
<a name="line-26736"></a><span class='hs-comment'>-- rank-3 tensor with two matrices of new values.</span>
<a name="line-26737"></a><span class='hs-comment'>-- </span>
<a name="line-26738"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-26739"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterNd2.png" alt&gt;</span>
<a name="line-26740"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-26741"></a><span class='hs-comment'>-- </span>
<a name="line-26742"></a><span class='hs-comment'>-- In Python, this scatter operation would look like this:</span>
<a name="line-26743"></a><span class='hs-comment'>-- </span>
<a name="line-26744"></a><span class='hs-comment'>--     indices = tf.constant([[0], [2]])</span>
<a name="line-26745"></a><span class='hs-comment'>--     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],</span>
<a name="line-26746"></a><span class='hs-comment'>--                             [7, 7, 7, 7], [8, 8, 8, 8]],</span>
<a name="line-26747"></a><span class='hs-comment'>--                            [[5, 5, 5, 5], [6, 6, 6, 6],</span>
<a name="line-26748"></a><span class='hs-comment'>--                             [7, 7, 7, 7], [8, 8, 8, 8]]])</span>
<a name="line-26749"></a><span class='hs-comment'>--     shape = tf.constant([4, 4, 4])</span>
<a name="line-26750"></a><span class='hs-comment'>--     scatter = tf.scatter_nd(indices, updates, shape)</span>
<a name="line-26751"></a><span class='hs-comment'>--     with tf.Session() as sess:</span>
<a name="line-26752"></a><span class='hs-comment'>--       print sess.run(scatter)</span>
<a name="line-26753"></a><span class='hs-comment'>-- </span>
<a name="line-26754"></a><span class='hs-comment'>-- The resulting tensor would look like this:</span>
<a name="line-26755"></a><span class='hs-comment'>-- </span>
<a name="line-26756"></a><span class='hs-comment'>--     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],</span>
<a name="line-26757"></a><span class='hs-comment'>--      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],</span>
<a name="line-26758"></a><span class='hs-comment'>--      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],</span>
<a name="line-26759"></a><span class='hs-comment'>--      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]</span>
<a name="line-26760"></a><span class='hs-definition'>scatterNd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-26761"></a>                                           <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-26762"></a>                                                   <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26763"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.</span>
<a name="line-26764"></a>                                <span class='hs-comment'>-- A tensor of indices into ref.</span>
<a name="line-26765"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A Tensor. Must have the same type as tensor. A tensor of updated values</span>
<a name="line-26766"></a>                            <span class='hs-comment'>-- to store in ref.</span>
<a name="line-26767"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __shape__: A vector. The shape of the resulting tensor.</span>
<a name="line-26768"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: A new tensor with the given shape and updates applied according</span>
<a name="line-26769"></a>             <span class='hs-comment'>-- to the indices.</span>
<a name="line-26770"></a><span class='hs-definition'>scatterNd</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-varid'>shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26771"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterNd"</span>
<a name="line-26772"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-26773"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26774"></a>        <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-varid'>shape</span>
<a name="line-26775"></a><span class='hs-comment'>{-
<a name="line-26776"></a>attr { name: "T" type: "type" }
<a name="line-26777"></a>attr {
<a name="line-26778"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-26779"></a>  name: "Tindices"
<a name="line-26780"></a>  type: "type"
<a name="line-26781"></a>}
<a name="line-26782"></a>input_arg {
<a name="line-26783"></a>  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
<a name="line-26784"></a>  name: "indices"
<a name="line-26785"></a>  type_attr: "Tindices"
<a name="line-26786"></a>}
<a name="line-26787"></a>input_arg {
<a name="line-26788"></a>  description: "A Tensor. Must have the same type as tensor. A tensor of updated values\nto store in ref."
<a name="line-26789"></a>  name: "updates"
<a name="line-26790"></a>  type_attr: "T"
<a name="line-26791"></a>}
<a name="line-26792"></a>input_arg {
<a name="line-26793"></a>  description: "A vector. The shape of the resulting tensor."
<a name="line-26794"></a>  name: "shape"
<a name="line-26795"></a>  type_attr: "Tindices"
<a name="line-26796"></a>}
<a name="line-26797"></a>output_arg {
<a name="line-26798"></a>  description: "A new tensor with the given shape and updates applied according\nto the indices."
<a name="line-26799"></a>  name: "output"
<a name="line-26800"></a>  type_attr: "T"
<a name="line-26801"></a>}
<a name="line-26802"></a>-}</span>
<a name="line-26803"></a>
<a name="line-26804"></a><a name="priorityQueue"></a><span class='hs-comment'>-- | A queue that produces elements sorted by the first component value.</span>
<a name="line-26805"></a><span class='hs-comment'>--</span>
<a name="line-26806"></a><span class='hs-comment'>-- Note that the PriorityQueue requires the first component of any element</span>
<a name="line-26807"></a><span class='hs-comment'>-- to be a scalar int64, in addition to the other elements declared by</span>
<a name="line-26808"></a><span class='hs-comment'>-- component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue</span>
<a name="line-26809"></a><span class='hs-comment'>-- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra</span>
<a name="line-26810"></a><span class='hs-comment'>-- entry in their input (resp. output) lists.</span>
<a name="line-26811"></a><span class='hs-definition'>priorityQueue</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __handle__: The handle to the queue.</span>
<a name="line-26812"></a><span class='hs-definition'>priorityQueue</span>  <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26813"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"PriorityQueue"</span><span class='hs-layout'>)</span>
<a name="line-26814"></a>        
<a name="line-26815"></a><span class='hs-comment'>{-
<a name="line-26816"></a>attr {
<a name="line-26817"></a>  default_value { list { } }
<a name="line-26818"></a>  description: "The type of each component in a value."
<a name="line-26819"></a>  has_minimum: true
<a name="line-26820"></a>  name: "component_types"
<a name="line-26821"></a>  type: "list(type)"
<a name="line-26822"></a>}
<a name="line-26823"></a>attr {
<a name="line-26824"></a>  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
<a name="line-26825"></a>  has_minimum: true
<a name="line-26826"></a>  name: "shapes"
<a name="line-26827"></a>  type: "list(shape)"
<a name="line-26828"></a>}
<a name="line-26829"></a>attr {
<a name="line-26830"></a>  default_value { i: -1 }
<a name="line-26831"></a>  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
<a name="line-26832"></a>  name: "capacity"
<a name="line-26833"></a>  type: "int"
<a name="line-26834"></a>}
<a name="line-26835"></a>attr {
<a name="line-26836"></a>  default_value { s: "" }
<a name="line-26837"></a>  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
<a name="line-26838"></a>  name: "container"
<a name="line-26839"></a>  type: "string"
<a name="line-26840"></a>}
<a name="line-26841"></a>attr {
<a name="line-26842"></a>  default_value { s: "" }
<a name="line-26843"></a>  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
<a name="line-26844"></a>  name: "shared_name"
<a name="line-26845"></a>  type: "string"
<a name="line-26846"></a>}
<a name="line-26847"></a>output_arg {
<a name="line-26848"></a>  description: "The handle to the queue."
<a name="line-26849"></a>  is_ref: true
<a name="line-26850"></a>  name: "handle"
<a name="line-26851"></a>  type: DT_STRING
<a name="line-26852"></a>}
<a name="line-26853"></a>-}</span>
<a name="line-26854"></a>
<a name="line-26855"></a><a name="refSwitch"></a><span class='hs-comment'>-- | Forwards the ref tensor `data` to the output port determined by `pred`.</span>
<a name="line-26856"></a><span class='hs-comment'>--</span>
<a name="line-26857"></a><span class='hs-comment'>-- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,</span>
<a name="line-26858"></a><span class='hs-comment'>-- the data goes to `output_false`.</span>
<a name="line-26859"></a><span class='hs-comment'>-- </span>
<a name="line-26860"></a><span class='hs-comment'>-- See also `Switch` and `Merge`.</span>
<a name="line-26861"></a><span class='hs-definition'>refSwitch</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26862"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The ref tensor to be forwarded to the appropriate output.</span>
<a name="line-26863"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Bool</span> <span class='hs-comment'>-- ^ __pred__: A scalar that specifies which output port will receive data.</span>
<a name="line-26864"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26865"></a>             <span class='hs-comment'>-- ^ (__output_false__, __output_true__)</span>
<a name="line-26866"></a>             <span class='hs-comment'>--</span>
<a name="line-26867"></a>             <span class='hs-comment'>-- * __output_false__: If `pred` is false, data will be forwarded to this output.</span>
<a name="line-26868"></a>             <span class='hs-comment'>--</span>
<a name="line-26869"></a>             <span class='hs-comment'>-- * __output_true__: If `pred` is true, data will be forwarded to this output.</span>
<a name="line-26870"></a><span class='hs-definition'>refSwitch</span> <span class='hs-varid'>data'</span> <span class='hs-varid'>pred</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26871"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefSwitch"</span>
<a name="line-26872"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26873"></a>        <span class='hs-varid'>data'</span> <span class='hs-varid'>pred</span>
<a name="line-26874"></a><span class='hs-comment'>{-
<a name="line-26875"></a>attr { name: "T" type: "type" }
<a name="line-26876"></a>input_arg {
<a name="line-26877"></a>  description: "The ref tensor to be forwarded to the appropriate output."
<a name="line-26878"></a>  is_ref: true
<a name="line-26879"></a>  name: "data"
<a name="line-26880"></a>  type_attr: "T"
<a name="line-26881"></a>}
<a name="line-26882"></a>input_arg {
<a name="line-26883"></a>  description: "A scalar that specifies which output port will receive data."
<a name="line-26884"></a>  name: "pred"
<a name="line-26885"></a>  type: DT_BOOL
<a name="line-26886"></a>}
<a name="line-26887"></a>output_arg {
<a name="line-26888"></a>  description: "If `pred` is false, data will be forwarded to this output."
<a name="line-26889"></a>  is_ref: true
<a name="line-26890"></a>  name: "output_false"
<a name="line-26891"></a>  type_attr: "T"
<a name="line-26892"></a>}
<a name="line-26893"></a>output_arg {
<a name="line-26894"></a>  description: "If `pred` is true, data will be forwarded to this output."
<a name="line-26895"></a>  is_ref: true
<a name="line-26896"></a>  name: "output_true"
<a name="line-26897"></a>  type_attr: "T"
<a name="line-26898"></a>}
<a name="line-26899"></a>-}</span>
<a name="line-26900"></a>
<a name="line-26901"></a><span class='hs-comment'>-- | Makes its input available to the next iteration.</span>
<a name="line-26902"></a>
<a name="line-26903"></a><a name="nextIteration"></a><span class='hs-definition'>nextIteration</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26904"></a>                 <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the next iteration.</span>
<a name="line-26905"></a>                 <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-26906"></a><span class='hs-definition'>nextIteration</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26907"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"NextIteration"</span>
<a name="line-26908"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26909"></a>        <span class='hs-varid'>data'</span>
<a name="line-26910"></a><span class='hs-comment'>{-
<a name="line-26911"></a>attr { name: "T" type: "type" }
<a name="line-26912"></a>input_arg {
<a name="line-26913"></a>  description: "The tensor to be made available to the next iteration."
<a name="line-26914"></a>  name: "data"
<a name="line-26915"></a>  type_attr: "T"
<a name="line-26916"></a>}
<a name="line-26917"></a>output_arg {
<a name="line-26918"></a>  description: "The same tensor as `data`."
<a name="line-26919"></a>  name: "output"
<a name="line-26920"></a>  type_attr: "T"
<a name="line-26921"></a>}
<a name="line-26922"></a>-}</span>
<a name="line-26923"></a>
<a name="line-26924"></a><span class='hs-comment'>-- | Makes its input available to the next iteration.</span>
<a name="line-26925"></a>
<a name="line-26926"></a><a name="refNextIteration"></a><span class='hs-definition'>refNextIteration</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26927"></a>                    <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the next iteration.</span>
<a name="line-26928"></a>                    <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-26929"></a><span class='hs-definition'>refNextIteration</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26930"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefNextIteration"</span>
<a name="line-26931"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26932"></a>        <span class='hs-varid'>data'</span>
<a name="line-26933"></a><span class='hs-comment'>{-
<a name="line-26934"></a>attr { name: "T" type: "type" }
<a name="line-26935"></a>input_arg {
<a name="line-26936"></a>  description: "The tensor to be made available to the next iteration."
<a name="line-26937"></a>  is_ref: true
<a name="line-26938"></a>  name: "data"
<a name="line-26939"></a>  type_attr: "T"
<a name="line-26940"></a>}
<a name="line-26941"></a>output_arg {
<a name="line-26942"></a>  description: "The same tensor as `data`."
<a name="line-26943"></a>  is_ref: true
<a name="line-26944"></a>  name: "output"
<a name="line-26945"></a>  type_attr: "T"
<a name="line-26946"></a>}
<a name="line-26947"></a>-}</span>
<a name="line-26948"></a>
<a name="line-26949"></a><a name="batchMatMul"></a><span class='hs-comment'>-- | Multiplies slices of two tensors in batches.</span>
<a name="line-26950"></a><span class='hs-comment'>--</span>
<a name="line-26951"></a><span class='hs-comment'>-- Multiplies all slices of `Tensor` `x` and `y` (each slice can be</span>
<a name="line-26952"></a><span class='hs-comment'>-- viewed as an element of a batch), and arranges the individual results</span>
<a name="line-26953"></a><span class='hs-comment'>-- in a single output tensor of the same batch size. Each of the</span>
<a name="line-26954"></a><span class='hs-comment'>-- individual slices can optionally be adjointed (to adjoint a matrix</span>
<a name="line-26955"></a><span class='hs-comment'>-- means to transpose and conjugate it) before multiplication by setting</span>
<a name="line-26956"></a><span class='hs-comment'>-- the `adj_x` or `adj_y` flag to `True`, which are by default `False`.</span>
<a name="line-26957"></a><span class='hs-comment'>-- </span>
<a name="line-26958"></a><span class='hs-comment'>-- The input tensors `x` and `y` are 3-D or higher with shape `[..., r_x, c_x]`</span>
<a name="line-26959"></a><span class='hs-comment'>-- and `[..., r_y, c_y]`.</span>
<a name="line-26960"></a><span class='hs-comment'>-- </span>
<a name="line-26961"></a><span class='hs-comment'>-- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:</span>
<a name="line-26962"></a><span class='hs-comment'>-- </span>
<a name="line-26963"></a><span class='hs-comment'>--     r_o = c_x if adj_x else r_x</span>
<a name="line-26964"></a><span class='hs-comment'>--     c_o = r_y if adj_y else c_y</span>
<a name="line-26965"></a><span class='hs-comment'>-- </span>
<a name="line-26966"></a><span class='hs-comment'>-- It is computed as:</span>
<a name="line-26967"></a><span class='hs-comment'>-- </span>
<a name="line-26968"></a><span class='hs-comment'>--     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])</span>
<a name="line-26969"></a><span class='hs-definition'>batchMatMul</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-26970"></a>                                 <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26971"></a>                                         <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-26972"></a>                                         <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-26973"></a>                                         <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-26974"></a>               <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__: 3-D or higher with shape `[..., r_x, c_x]`.</span>
<a name="line-26975"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__: 3-D or higher with shape `[..., r_y, c_y]`.</span>
<a name="line-26976"></a>               <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]`</span>
<a name="line-26977"></a><span class='hs-definition'>batchMatMul</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-26978"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"BatchMatMul"</span>
<a name="line-26979"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-26980"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-26981"></a><span class='hs-comment'>{-
<a name="line-26982"></a>attr {
<a name="line-26983"></a>  allowed_values {
<a name="line-26984"></a>    list {
<a name="line-26985"></a>      type: DT_HALF
<a name="line-26986"></a>      type: DT_FLOAT
<a name="line-26987"></a>      type: DT_DOUBLE
<a name="line-26988"></a>      type: DT_INT32
<a name="line-26989"></a>      type: DT_COMPLEX64
<a name="line-26990"></a>      type: DT_COMPLEX128
<a name="line-26991"></a>    }
<a name="line-26992"></a>  }
<a name="line-26993"></a>  name: "T"
<a name="line-26994"></a>  type: "type"
<a name="line-26995"></a>}
<a name="line-26996"></a>attr {
<a name="line-26997"></a>  default_value { b: false }
<a name="line-26998"></a>  description: "If `True`, adjoint the slices of `x`. Defaults to `False`."
<a name="line-26999"></a>  name: "adj_x"
<a name="line-27000"></a>  type: "bool"
<a name="line-27001"></a>}
<a name="line-27002"></a>attr {
<a name="line-27003"></a>  default_value { b: false }
<a name="line-27004"></a>  description: "If `True`, adjoint the slices of `y`. Defaults to `False`."
<a name="line-27005"></a>  name: "adj_y"
<a name="line-27006"></a>  type: "bool"
<a name="line-27007"></a>}
<a name="line-27008"></a>input_arg {
<a name="line-27009"></a>  description: "3-D or higher with shape `[..., r_x, c_x]`."
<a name="line-27010"></a>  name: "x"
<a name="line-27011"></a>  type_attr: "T"
<a name="line-27012"></a>}
<a name="line-27013"></a>input_arg {
<a name="line-27014"></a>  description: "3-D or higher with shape `[..., r_y, c_y]`."
<a name="line-27015"></a>  name: "y"
<a name="line-27016"></a>  type_attr: "T"
<a name="line-27017"></a>}
<a name="line-27018"></a>output_arg {
<a name="line-27019"></a>  description: "3-D or higher with shape `[..., r_o, c_o]`"
<a name="line-27020"></a>  name: "output"
<a name="line-27021"></a>  type_attr: "T"
<a name="line-27022"></a>}
<a name="line-27023"></a>-}</span>
<a name="line-27024"></a>
<a name="line-27025"></a><span class='hs-comment'>-- | Forwards the `index`th element of `inputs` to `output`.</span>
<a name="line-27026"></a>
<a name="line-27027"></a><a name="refSelect"></a><span class='hs-definition'>refSelect</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27028"></a>             <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span> <span class='hs-comment'>-- ^ __index__: A scalar that determines the input that gets selected.</span>
<a name="line-27029"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-keyglyph'>[</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-keyglyph'>]</span> <span class='hs-comment'>-- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`.</span>
<a name="line-27030"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The forwarded tensor.</span>
<a name="line-27031"></a><span class='hs-definition'>refSelect</span> <span class='hs-varid'>index</span> <span class='hs-varid'>inputs</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"N"</span><span class='hs-layout'>,</span> <span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-str'>"inputs"</span><span class='hs-layout'>,</span> <span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span><span class='hs-layout'>)</span><span class='hs-keyglyph'>]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27032"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefSelect"</span>
<a name="line-27033"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-27034"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"N"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>n</span><span class='hs-layout'>)</span>
<a name="line-27035"></a>        <span class='hs-varid'>index</span> <span class='hs-varid'>inputs</span>
<a name="line-27036"></a>  <span class='hs-keyword'>where</span>
<a name="line-27037"></a>    <span class='hs-varid'>n</span> <span class='hs-keyglyph'>=</span> <span class='hs-varid'>fromIntegral</span> <span class='hs-layout'>(</span><span class='hs-varid'>length</span> <span class='hs-varid'>inputs</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Int64</span>
<a name="line-27038"></a><span class='hs-comment'>{-
<a name="line-27039"></a>attr { name: "T" type: "type" }
<a name="line-27040"></a>attr { has_minimum: true minimum: 1 name: "N" type: "int" }
<a name="line-27041"></a>input_arg {
<a name="line-27042"></a>  description: "A scalar that determines the input that gets selected."
<a name="line-27043"></a>  name: "index"
<a name="line-27044"></a>  type: DT_INT32
<a name="line-27045"></a>}
<a name="line-27046"></a>input_arg {
<a name="line-27047"></a>  description: "A list of ref tensors, one of which will be forwarded to `output`."
<a name="line-27048"></a>  is_ref: true
<a name="line-27049"></a>  name: "inputs"
<a name="line-27050"></a>  number_attr: "N"
<a name="line-27051"></a>  type_attr: "T"
<a name="line-27052"></a>}
<a name="line-27053"></a>output_arg {
<a name="line-27054"></a>  description: "The forwarded tensor."
<a name="line-27055"></a>  is_ref: true
<a name="line-27056"></a>  name: "output"
<a name="line-27057"></a>  type_attr: "T"
<a name="line-27058"></a>}
<a name="line-27059"></a>-}</span>
<a name="line-27060"></a>
<a name="line-27061"></a><a name="mean"></a><span class='hs-comment'>-- | Computes the mean of elements across dimensions of a tensor.</span>
<a name="line-27062"></a><span class='hs-comment'>--</span>
<a name="line-27063"></a><span class='hs-comment'>-- Reduces `input` along the dimensions given in `reduction_indices`. Unless</span>
<a name="line-27064"></a><span class='hs-comment'>-- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in</span>
<a name="line-27065"></a><span class='hs-comment'>-- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are</span>
<a name="line-27066"></a><span class='hs-comment'>-- retained with length 1.</span>
<a name="line-27067"></a><span class='hs-definition'>mean</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tidx</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-27068"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27069"></a>                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27070"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27071"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-27072"></a>                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-27073"></a>                                       <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>,</span>
<a name="line-27074"></a>                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27075"></a>        <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __input__: The tensor to reduce.</span>
<a name="line-27076"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tidx</span> <span class='hs-comment'>-- ^ __reduction_indices__: The dimensions to reduce.</span>
<a name="line-27077"></a>        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: The reduced tensor.</span>
<a name="line-27078"></a><span class='hs-definition'>mean</span> <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27079"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Mean"</span>
<a name="line-27080"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-27081"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tidx"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tidx</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27082"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>reduction_indices</span>
<a name="line-27083"></a><span class='hs-comment'>{-
<a name="line-27084"></a>attr {
<a name="line-27085"></a>  default_value { b: false }
<a name="line-27086"></a>  description: "If true, retain reduced dimensions with length 1."
<a name="line-27087"></a>  name: "keep_dims"
<a name="line-27088"></a>  type: "bool"
<a name="line-27089"></a>}
<a name="line-27090"></a>attr {
<a name="line-27091"></a>  allowed_values {
<a name="line-27092"></a>    list {
<a name="line-27093"></a>      type: DT_FLOAT
<a name="line-27094"></a>      type: DT_DOUBLE
<a name="line-27095"></a>      type: DT_INT64
<a name="line-27096"></a>      type: DT_INT32
<a name="line-27097"></a>      type: DT_UINT8
<a name="line-27098"></a>      type: DT_UINT16
<a name="line-27099"></a>      type: DT_INT16
<a name="line-27100"></a>      type: DT_INT8
<a name="line-27101"></a>      type: DT_COMPLEX64
<a name="line-27102"></a>      type: DT_COMPLEX128
<a name="line-27103"></a>      type: DT_QINT8
<a name="line-27104"></a>      type: DT_QUINT8
<a name="line-27105"></a>      type: DT_QINT32
<a name="line-27106"></a>      type: DT_HALF
<a name="line-27107"></a>    }
<a name="line-27108"></a>  }
<a name="line-27109"></a>  name: "T"
<a name="line-27110"></a>  type: "type"
<a name="line-27111"></a>}
<a name="line-27112"></a>attr {
<a name="line-27113"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-27114"></a>  default_value { type: DT_INT32 }
<a name="line-27115"></a>  name: "Tidx"
<a name="line-27116"></a>  type: "type"
<a name="line-27117"></a>}
<a name="line-27118"></a>input_arg {
<a name="line-27119"></a>  description: "The tensor to reduce." name: "input" type_attr: "T"
<a name="line-27120"></a>}
<a name="line-27121"></a>input_arg {
<a name="line-27122"></a>  description: "The dimensions to reduce."
<a name="line-27123"></a>  name: "reduction_indices"
<a name="line-27124"></a>  type_attr: "Tidx"
<a name="line-27125"></a>}
<a name="line-27126"></a>output_arg {
<a name="line-27127"></a>  description: "The reduced tensor." name: "output" type_attr: "T"
<a name="line-27128"></a>}
<a name="line-27129"></a>-}</span>
<a name="line-27130"></a>
<a name="line-27131"></a><a name="scatterAdd"></a><span class='hs-comment'>-- | Adds sparse updates to a variable reference.</span>
<a name="line-27132"></a><span class='hs-comment'>--</span>
<a name="line-27133"></a><span class='hs-comment'>-- This operation computes</span>
<a name="line-27134"></a><span class='hs-comment'>-- </span>
<a name="line-27135"></a><span class='hs-comment'>--     # Scalar indices</span>
<a name="line-27136"></a><span class='hs-comment'>--     ref[indices, ...] += updates[...]</span>
<a name="line-27137"></a><span class='hs-comment'>-- </span>
<a name="line-27138"></a><span class='hs-comment'>--     # Vector indices (for each i)</span>
<a name="line-27139"></a><span class='hs-comment'>--     ref[indices[i], ...] += updates[i, ...]</span>
<a name="line-27140"></a><span class='hs-comment'>-- </span>
<a name="line-27141"></a><span class='hs-comment'>--     # High rank indices (for each i, ..., j)</span>
<a name="line-27142"></a><span class='hs-comment'>--     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]</span>
<a name="line-27143"></a><span class='hs-comment'>-- </span>
<a name="line-27144"></a><span class='hs-comment'>-- This operation outputs `ref` after the update is done.</span>
<a name="line-27145"></a><span class='hs-comment'>-- This makes it easier to chain operations that need to use the reset value.</span>
<a name="line-27146"></a><span class='hs-comment'>-- </span>
<a name="line-27147"></a><span class='hs-comment'>-- Duplicate entries are handled correctly: if multiple `indices` reference</span>
<a name="line-27148"></a><span class='hs-comment'>-- the same location, their contributions add.</span>
<a name="line-27149"></a><span class='hs-comment'>-- </span>
<a name="line-27150"></a><span class='hs-comment'>-- Requires `updates.shape = indices.shape + ref.shape[1:]`.</span>
<a name="line-27151"></a><span class='hs-comment'>-- </span>
<a name="line-27152"></a><span class='hs-comment'>-- &lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&gt;</span>
<a name="line-27153"></a><span class='hs-comment'>-- &lt;img style="width:100%" src="../../images/ScatterAdd.png" alt&gt;</span>
<a name="line-27154"></a><span class='hs-comment'>-- &lt;/div&gt;</span>
<a name="line-27155"></a><span class='hs-definition'>scatterAdd</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varid'>tindices</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-27156"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27157"></a>                                                 <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27158"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27159"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-27160"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-27161"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-27162"></a>                                                 <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>,</span>
<a name="line-27163"></a>                                         <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27164"></a>                                                 <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27165"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __ref__: Should be from a `Variable` node.</span>
<a name="line-27166"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tindices</span> <span class='hs-comment'>-- ^ __indices__: A tensor of indices into the first dimension of `ref`.</span>
<a name="line-27167"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __updates__: A tensor of updated values to add to `ref`.</span>
<a name="line-27168"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want</span>
<a name="line-27169"></a>              <span class='hs-comment'>-- to use the updated values after the update is done.</span>
<a name="line-27170"></a><span class='hs-definition'>scatterAdd</span> <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27171"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ScatterAdd"</span>
<a name="line-27172"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-27173"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tindices"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tindices</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27174"></a>        <span class='hs-varid'>ref</span> <span class='hs-varid'>indices</span> <span class='hs-varid'>updates</span>
<a name="line-27175"></a><span class='hs-comment'>{-
<a name="line-27176"></a>attr {
<a name="line-27177"></a>  allowed_values {
<a name="line-27178"></a>    list {
<a name="line-27179"></a>      type: DT_FLOAT
<a name="line-27180"></a>      type: DT_DOUBLE
<a name="line-27181"></a>      type: DT_INT64
<a name="line-27182"></a>      type: DT_INT32
<a name="line-27183"></a>      type: DT_UINT8
<a name="line-27184"></a>      type: DT_UINT16
<a name="line-27185"></a>      type: DT_INT16
<a name="line-27186"></a>      type: DT_INT8
<a name="line-27187"></a>      type: DT_COMPLEX64
<a name="line-27188"></a>      type: DT_COMPLEX128
<a name="line-27189"></a>      type: DT_QINT8
<a name="line-27190"></a>      type: DT_QUINT8
<a name="line-27191"></a>      type: DT_QINT32
<a name="line-27192"></a>      type: DT_HALF
<a name="line-27193"></a>    }
<a name="line-27194"></a>  }
<a name="line-27195"></a>  name: "T"
<a name="line-27196"></a>  type: "type"
<a name="line-27197"></a>}
<a name="line-27198"></a>attr {
<a name="line-27199"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-27200"></a>  name: "Tindices"
<a name="line-27201"></a>  type: "type"
<a name="line-27202"></a>}
<a name="line-27203"></a>attr {
<a name="line-27204"></a>  default_value { b: false }
<a name="line-27205"></a>  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
<a name="line-27206"></a>  name: "use_locking"
<a name="line-27207"></a>  type: "bool"
<a name="line-27208"></a>}
<a name="line-27209"></a>input_arg {
<a name="line-27210"></a>  description: "Should be from a `Variable` node."
<a name="line-27211"></a>  is_ref: true
<a name="line-27212"></a>  name: "ref"
<a name="line-27213"></a>  type_attr: "T"
<a name="line-27214"></a>}
<a name="line-27215"></a>input_arg {
<a name="line-27216"></a>  description: "A tensor of indices into the first dimension of `ref`."
<a name="line-27217"></a>  name: "indices"
<a name="line-27218"></a>  type_attr: "Tindices"
<a name="line-27219"></a>}
<a name="line-27220"></a>input_arg {
<a name="line-27221"></a>  description: "A tensor of updated values to add to `ref`."
<a name="line-27222"></a>  name: "updates"
<a name="line-27223"></a>  type_attr: "T"
<a name="line-27224"></a>}
<a name="line-27225"></a>output_arg {
<a name="line-27226"></a>  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
<a name="line-27227"></a>  is_ref: true
<a name="line-27228"></a>  name: "output_ref"
<a name="line-27229"></a>  type_attr: "T"
<a name="line-27230"></a>}
<a name="line-27231"></a>-}</span>
<a name="line-27232"></a>
<a name="line-27233"></a><a name="randomCrop"></a><span class='hs-comment'>-- | Randomly crop `image`.</span>
<a name="line-27234"></a><span class='hs-comment'>--</span>
<a name="line-27235"></a><span class='hs-comment'>-- `size` is a 1-D int64 tensor with 2 elements representing the crop height and</span>
<a name="line-27236"></a><span class='hs-comment'>-- width.  The values must be non negative.</span>
<a name="line-27237"></a><span class='hs-comment'>-- </span>
<a name="line-27238"></a><span class='hs-comment'>-- This Op picks a random location in `image` and crops a `height` by `width`</span>
<a name="line-27239"></a><span class='hs-comment'>-- rectangle from that location.  The random location is picked so the cropped</span>
<a name="line-27240"></a><span class='hs-comment'>-- area will fit inside the original image.</span>
<a name="line-27241"></a><span class='hs-definition'>randomCrop</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-27242"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27243"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-27244"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-27245"></a>                                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-27246"></a>                                                      <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27247"></a>              <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __image__: 3-D of shape `[height, width, channels]`.</span>
<a name="line-27248"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`..</span>
<a name="line-27249"></a>              <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].`</span>
<a name="line-27250"></a><span class='hs-definition'>randomCrop</span> <span class='hs-varid'>image</span> <span class='hs-varid'>size</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27251"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RandomCrop"</span>
<a name="line-27252"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27253"></a>        <span class='hs-varid'>image</span> <span class='hs-varid'>size</span>
<a name="line-27254"></a><span class='hs-comment'>{-
<a name="line-27255"></a>attr {
<a name="line-27256"></a>  allowed_values {
<a name="line-27257"></a>    list {
<a name="line-27258"></a>      type: DT_UINT8
<a name="line-27259"></a>      type: DT_INT8
<a name="line-27260"></a>      type: DT_INT16
<a name="line-27261"></a>      type: DT_INT32
<a name="line-27262"></a>      type: DT_INT64
<a name="line-27263"></a>      type: DT_FLOAT
<a name="line-27264"></a>      type: DT_DOUBLE
<a name="line-27265"></a>    }
<a name="line-27266"></a>  }
<a name="line-27267"></a>  name: "T"
<a name="line-27268"></a>  type: "type"
<a name="line-27269"></a>}
<a name="line-27270"></a>attr {
<a name="line-27271"></a>  default_value { i: 0 }
<a name="line-27272"></a>  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
<a name="line-27273"></a>  name: "seed"
<a name="line-27274"></a>  type: "int"
<a name="line-27275"></a>}
<a name="line-27276"></a>attr {
<a name="line-27277"></a>  default_value { i: 0 }
<a name="line-27278"></a>  description: "An second seed to avoid seed collision."
<a name="line-27279"></a>  name: "seed2"
<a name="line-27280"></a>  type: "int"
<a name="line-27281"></a>}
<a name="line-27282"></a>input_arg {
<a name="line-27283"></a>  description: "3-D of shape `[height, width, channels]`."
<a name="line-27284"></a>  name: "image"
<a name="line-27285"></a>  type_attr: "T"
<a name="line-27286"></a>}
<a name="line-27287"></a>input_arg {
<a name="line-27288"></a>  description: "1-D of length 2 containing: `crop_height`, `crop_width`.."
<a name="line-27289"></a>  name: "size"
<a name="line-27290"></a>  type: DT_INT64
<a name="line-27291"></a>}
<a name="line-27292"></a>output_arg {
<a name="line-27293"></a>  description: "3-D of shape `[crop_height, crop_width, channels].`"
<a name="line-27294"></a>  name: "output"
<a name="line-27295"></a>  type_attr: "T"
<a name="line-27296"></a>}
<a name="line-27297"></a>-}</span>
<a name="line-27298"></a>
<a name="line-27299"></a><a name="refExit"></a><span class='hs-comment'>-- | Exits the current frame to its parent frame.</span>
<a name="line-27300"></a><span class='hs-comment'>--</span>
<a name="line-27301"></a><span class='hs-comment'>-- Exit makes its input `data` available to the parent frame.</span>
<a name="line-27302"></a><span class='hs-definition'>refExit</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27303"></a>           <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __data__: The tensor to be made available to the parent frame.</span>
<a name="line-27304"></a>           <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __output__: The same tensor as `data`.</span>
<a name="line-27305"></a><span class='hs-definition'>refExit</span> <span class='hs-varid'>data'</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27306"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"RefExit"</span>
<a name="line-27307"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27308"></a>        <span class='hs-varid'>data'</span>
<a name="line-27309"></a><span class='hs-comment'>{-
<a name="line-27310"></a>attr { name: "T" type: "type" }
<a name="line-27311"></a>input_arg {
<a name="line-27312"></a>  description: "The tensor to be made available to the parent frame."
<a name="line-27313"></a>  is_ref: true
<a name="line-27314"></a>  name: "data"
<a name="line-27315"></a>  type_attr: "T"
<a name="line-27316"></a>}
<a name="line-27317"></a>output_arg {
<a name="line-27318"></a>  description: "The same tensor as `data`."
<a name="line-27319"></a>  is_ref: true
<a name="line-27320"></a>  name: "output"
<a name="line-27321"></a>  type_attr: "T"
<a name="line-27322"></a>}
<a name="line-27323"></a>-}</span>
<a name="line-27324"></a>
<a name="line-27325"></a><a name="readerSerializeState"></a><span class='hs-comment'>-- | Produce a string tensor that encodes the state of a Reader.</span>
<a name="line-27326"></a><span class='hs-comment'>--</span>
<a name="line-27327"></a><span class='hs-comment'>-- Not all Readers support being serialized, so this can produce an</span>
<a name="line-27328"></a><span class='hs-comment'>-- Unimplemented error.</span>
<a name="line-27329"></a><span class='hs-definition'>readerSerializeState</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __reader_handle__: Handle to a Reader.</span>
<a name="line-27330"></a>                        <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-layout'>)</span> <span class='hs-comment'>-- ^ __state__</span>
<a name="line-27331"></a><span class='hs-definition'>readerSerializeState</span> <span class='hs-varid'>reader_handle</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27332"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"ReaderSerializeState"</span><span class='hs-layout'>)</span>
<a name="line-27333"></a>        <span class='hs-varid'>reader_handle</span>
<a name="line-27334"></a><span class='hs-comment'>{-
<a name="line-27335"></a>input_arg {
<a name="line-27336"></a>  description: "Handle to a Reader."
<a name="line-27337"></a>  is_ref: true
<a name="line-27338"></a>  name: "reader_handle"
<a name="line-27339"></a>  type: DT_STRING
<a name="line-27340"></a>}
<a name="line-27341"></a>output_arg { name: "state" type: DT_STRING }
<a name="line-27342"></a>-}</span>
<a name="line-27343"></a>
<a name="line-27344"></a><a name="tanhGrad"></a><span class='hs-comment'>-- | Computes the gradient for the tanh of `x` wrt its input.</span>
<a name="line-27345"></a><span class='hs-comment'>--</span>
<a name="line-27346"></a><span class='hs-comment'>-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`</span>
<a name="line-27347"></a><span class='hs-comment'>-- is the corresponding input gradient.</span>
<a name="line-27348"></a><span class='hs-definition'>tanhGrad</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-27349"></a>                              <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27350"></a>                                      <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27351"></a>                                      <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span> <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27352"></a>            <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __x__</span>
<a name="line-27353"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __y__</span>
<a name="line-27354"></a>            <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __z__</span>
<a name="line-27355"></a><span class='hs-definition'>tanhGrad</span> <span class='hs-varid'>x</span> <span class='hs-varid'>y</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27356"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"TanhGrad"</span>
<a name="line-27357"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27358"></a>        <span class='hs-varid'>x</span> <span class='hs-varid'>y</span>
<a name="line-27359"></a><span class='hs-comment'>{-
<a name="line-27360"></a>attr {
<a name="line-27361"></a>  allowed_values {
<a name="line-27362"></a>    list {
<a name="line-27363"></a>      type: DT_HALF
<a name="line-27364"></a>      type: DT_FLOAT
<a name="line-27365"></a>      type: DT_DOUBLE
<a name="line-27366"></a>      type: DT_COMPLEX64
<a name="line-27367"></a>      type: DT_COMPLEX128
<a name="line-27368"></a>    }
<a name="line-27369"></a>  }
<a name="line-27370"></a>  name: "T"
<a name="line-27371"></a>  type: "type"
<a name="line-27372"></a>}
<a name="line-27373"></a>input_arg { name: "x" type_attr: "T" }
<a name="line-27374"></a>input_arg { name: "y" type_attr: "T" }
<a name="line-27375"></a>output_arg { name: "z" type_attr: "T" }
<a name="line-27376"></a>-}</span>
<a name="line-27377"></a>
<a name="line-27378"></a><a name="sparseSparseMaximum"></a><span class='hs-comment'>-- | Returns the element-wise max of two SparseTensors.</span>
<a name="line-27379"></a><span class='hs-comment'>--</span>
<a name="line-27380"></a><span class='hs-comment'>-- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.</span>
<a name="line-27381"></a><span class='hs-definition'>sparseSparseMaximum</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>v6</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-27382"></a>                                                     <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-27383"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27384"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-27385"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-27386"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-27387"></a>                                                             <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span>
<a name="line-27388"></a>                                                             <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-27389"></a>                                                             <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27390"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-27391"></a>                                                <span class='hs-comment'>-- SparseTensor, in the canonical lexicographic ordering.</span>
<a name="line-27392"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.</span>
<a name="line-27393"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-27394"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_indices__: counterpart to `a_indices` for the other operand.</span>
<a name="line-27395"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v5</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.</span>
<a name="line-27396"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v6</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.</span>
<a name="line-27397"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-layout'>(</span><span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span>
<a name="line-27398"></a>                       <span class='hs-comment'>-- ^ (__output_indices__, __output_values__)</span>
<a name="line-27399"></a>                       <span class='hs-comment'>--</span>
<a name="line-27400"></a>                       <span class='hs-comment'>-- * __output_indices__: 2-D.  The indices of the output SparseTensor.</span>
<a name="line-27401"></a>                       <span class='hs-comment'>--</span>
<a name="line-27402"></a>                       <span class='hs-comment'>-- * __output_values__: 1-D.  The values of the output SparseTensor.</span>
<a name="line-27403"></a><span class='hs-definition'>sparseSparseMaximum</span> <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span>
<a name="line-27404"></a>                    <span class='hs-varid'>b_shape</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27405"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseSparseMaximum"</span>
<a name="line-27406"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27407"></a>        <span class='hs-varid'>a_indices</span> <span class='hs-varid'>a_values</span> <span class='hs-varid'>a_shape</span> <span class='hs-varid'>b_indices</span> <span class='hs-varid'>b_values</span> <span class='hs-varid'>b_shape</span>
<a name="line-27408"></a><span class='hs-comment'>{-
<a name="line-27409"></a>attr {
<a name="line-27410"></a>  allowed_values {
<a name="line-27411"></a>    list {
<a name="line-27412"></a>      type: DT_FLOAT
<a name="line-27413"></a>      type: DT_DOUBLE
<a name="line-27414"></a>      type: DT_INT32
<a name="line-27415"></a>      type: DT_INT64
<a name="line-27416"></a>      type: DT_UINT8
<a name="line-27417"></a>      type: DT_INT16
<a name="line-27418"></a>      type: DT_INT8
<a name="line-27419"></a>      type: DT_UINT16
<a name="line-27420"></a>      type: DT_HALF
<a name="line-27421"></a>    }
<a name="line-27422"></a>  }
<a name="line-27423"></a>  name: "T"
<a name="line-27424"></a>  type: "type"
<a name="line-27425"></a>}
<a name="line-27426"></a>input_arg {
<a name="line-27427"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
<a name="line-27428"></a>  name: "a_indices"
<a name="line-27429"></a>  type: DT_INT64
<a name="line-27430"></a>}
<a name="line-27431"></a>input_arg {
<a name="line-27432"></a>  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
<a name="line-27433"></a>  name: "a_values"
<a name="line-27434"></a>  type_attr: "T"
<a name="line-27435"></a>}
<a name="line-27436"></a>input_arg {
<a name="line-27437"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-27438"></a>  name: "a_shape"
<a name="line-27439"></a>  type: DT_INT64
<a name="line-27440"></a>}
<a name="line-27441"></a>input_arg {
<a name="line-27442"></a>  description: "counterpart to `a_indices` for the other operand."
<a name="line-27443"></a>  name: "b_indices"
<a name="line-27444"></a>  type: DT_INT64
<a name="line-27445"></a>}
<a name="line-27446"></a>input_arg {
<a name="line-27447"></a>  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
<a name="line-27448"></a>  name: "b_values"
<a name="line-27449"></a>  type_attr: "T"
<a name="line-27450"></a>}
<a name="line-27451"></a>input_arg {
<a name="line-27452"></a>  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
<a name="line-27453"></a>  name: "b_shape"
<a name="line-27454"></a>  type: DT_INT64
<a name="line-27455"></a>}
<a name="line-27456"></a>output_arg {
<a name="line-27457"></a>  description: "2-D.  The indices of the output SparseTensor."
<a name="line-27458"></a>  name: "output_indices"
<a name="line-27459"></a>  type: DT_INT64
<a name="line-27460"></a>}
<a name="line-27461"></a>output_arg {
<a name="line-27462"></a>  description: "1-D.  The values of the output SparseTensor."
<a name="line-27463"></a>  name: "output_values"
<a name="line-27464"></a>  type_attr: "T"
<a name="line-27465"></a>}
<a name="line-27466"></a>-}</span>
<a name="line-27467"></a>
<a name="line-27468"></a><a name="decodeGif"></a><span class='hs-comment'>-- | Decode the first frame of a GIF-encoded image to a uint8 tensor.</span>
<a name="line-27469"></a><span class='hs-comment'>--</span>
<a name="line-27470"></a><span class='hs-comment'>-- GIF with frame or transparency compression are not supported</span>
<a name="line-27471"></a><span class='hs-comment'>-- convert animated GIF from compressed to uncompressed by:</span>
<a name="line-27472"></a><span class='hs-comment'>-- </span>
<a name="line-27473"></a><span class='hs-comment'>-- convert $src.gif -coalesce $dst.gif</span>
<a name="line-27474"></a><span class='hs-definition'>decodeGif</span> <span class='hs-keyglyph'>::</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __contents__: 0-D.  The GIF-encoded image.</span>
<a name="line-27475"></a>             <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span> <span class='hs-comment'>-- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order</span>
<a name="line-27476"></a><span class='hs-definition'>decodeGif</span> <span class='hs-varid'>contents</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27477"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"DecodeGif"</span><span class='hs-layout'>)</span>
<a name="line-27478"></a>        <span class='hs-varid'>contents</span>
<a name="line-27479"></a><span class='hs-comment'>{-
<a name="line-27480"></a>input_arg {
<a name="line-27481"></a>  description: "0-D.  The GIF-encoded image."
<a name="line-27482"></a>  name: "contents"
<a name="line-27483"></a>  type: DT_STRING
<a name="line-27484"></a>}
<a name="line-27485"></a>output_arg {
<a name="line-27486"></a>  description: "4-D with shape `[num_frames, height, width, 3]`. RGB order"
<a name="line-27487"></a>  name: "image"
<a name="line-27488"></a>  type: DT_UINT8
<a name="line-27489"></a>}
<a name="line-27490"></a>-}</span>
<a name="line-27491"></a>
<a name="line-27492"></a><a name="substr"></a><span class='hs-comment'>-- | Return substrings from `Tensor` of strings.</span>
<a name="line-27493"></a><span class='hs-comment'>--</span>
<a name="line-27494"></a><span class='hs-comment'>-- For each string in the input `Tensor`, creates a substring starting at index</span>
<a name="line-27495"></a><span class='hs-comment'>-- `pos` with a total length of `len`.</span>
<a name="line-27496"></a><span class='hs-comment'>-- </span>
<a name="line-27497"></a><span class='hs-comment'>-- If `len` defines a substring that would extend beyond the length of the input</span>
<a name="line-27498"></a><span class='hs-comment'>-- string, then as many characters as possible are used.</span>
<a name="line-27499"></a><span class='hs-comment'>-- </span>
<a name="line-27500"></a><span class='hs-comment'>-- If `pos` is negative or specifies a character index larger than any of the input</span>
<a name="line-27501"></a><span class='hs-comment'>-- strings, then an `InvalidArgumentError` is thrown.</span>
<a name="line-27502"></a><span class='hs-comment'>-- </span>
<a name="line-27503"></a><span class='hs-comment'>-- `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on</span>
<a name="line-27504"></a><span class='hs-comment'>-- Op creation.</span>
<a name="line-27505"></a><span class='hs-comment'>-- </span>
<a name="line-27506"></a><span class='hs-comment'>-- *NOTE*: `Substr` supports broadcasting up to two dimensions. More about</span>
<a name="line-27507"></a><span class='hs-comment'>-- broadcasting</span>
<a name="line-27508"></a><span class='hs-comment'>-- [here](<a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)">http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)</a></span>
<a name="line-27509"></a><span class='hs-comment'>-- </span>
<a name="line-27510"></a><span class='hs-comment'>-- ---</span>
<a name="line-27511"></a><span class='hs-comment'>-- </span>
<a name="line-27512"></a><span class='hs-comment'>-- Examples</span>
<a name="line-27513"></a><span class='hs-comment'>-- </span>
<a name="line-27514"></a><span class='hs-comment'>-- Using scalar `pos` and `len`:</span>
<a name="line-27515"></a><span class='hs-comment'>-- </span>
<a name="line-27516"></a><span class='hs-comment'>-- ```</span>
<a name="line-27517"></a><span class='hs-comment'>-- input = [b'Hello', b'World']</span>
<a name="line-27518"></a><span class='hs-comment'>-- position = 1</span>
<a name="line-27519"></a><span class='hs-comment'>-- length = 3</span>
<a name="line-27520"></a><span class='hs-comment'>-- </span>
<a name="line-27521"></a><span class='hs-comment'>-- output = [b'ell', b'orl']</span>
<a name="line-27522"></a><span class='hs-comment'>-- ```</span>
<a name="line-27523"></a><span class='hs-comment'>-- </span>
<a name="line-27524"></a><span class='hs-comment'>-- Using `pos` and `len` with same shape as `input`:</span>
<a name="line-27525"></a><span class='hs-comment'>-- </span>
<a name="line-27526"></a><span class='hs-comment'>-- ```</span>
<a name="line-27527"></a><span class='hs-comment'>-- input = [[b'ten', b'eleven', b'twelve'],</span>
<a name="line-27528"></a><span class='hs-comment'>--          [b'thirteen', b'fourteen', b'fifteen'],</span>
<a name="line-27529"></a><span class='hs-comment'>--          [b'sixteen', b'seventeen', b'eighteen']]</span>
<a name="line-27530"></a><span class='hs-comment'>-- position = [[1, 2, 3],</span>
<a name="line-27531"></a><span class='hs-comment'>--             [1, 2, 3],</span>
<a name="line-27532"></a><span class='hs-comment'>--             [1, 2, 3]]</span>
<a name="line-27533"></a><span class='hs-comment'>-- length =   [[2, 3, 4],</span>
<a name="line-27534"></a><span class='hs-comment'>--             [4, 3, 2],</span>
<a name="line-27535"></a><span class='hs-comment'>--             [5, 5, 5]]</span>
<a name="line-27536"></a><span class='hs-comment'>-- </span>
<a name="line-27537"></a><span class='hs-comment'>-- output = [[b'en', b'eve', b'lve'],</span>
<a name="line-27538"></a><span class='hs-comment'>--           [b'hirt', b'urt', b'te'],</span>
<a name="line-27539"></a><span class='hs-comment'>--           [b'ixtee', b'vente', b'hteen']]</span>
<a name="line-27540"></a><span class='hs-comment'>-- ```</span>
<a name="line-27541"></a><span class='hs-comment'>-- </span>
<a name="line-27542"></a><span class='hs-comment'>-- Broadcasting `pos` and `len` onto `input`:</span>
<a name="line-27543"></a><span class='hs-comment'>-- </span>
<a name="line-27544"></a><span class='hs-comment'>-- ```</span>
<a name="line-27545"></a><span class='hs-comment'>-- input = [[b'ten', b'eleven', b'twelve'],</span>
<a name="line-27546"></a><span class='hs-comment'>--          [b'thirteen', b'fourteen', b'fifteen'],</span>
<a name="line-27547"></a><span class='hs-comment'>--          [b'sixteen', b'seventeen', b'eighteen'],</span>
<a name="line-27548"></a><span class='hs-comment'>--          [b'nineteen', b'twenty', b'twentyone']]</span>
<a name="line-27549"></a><span class='hs-comment'>-- position = [1, 2, 3]</span>
<a name="line-27550"></a><span class='hs-comment'>-- length =   [1, 2, 3]</span>
<a name="line-27551"></a><span class='hs-comment'>-- </span>
<a name="line-27552"></a><span class='hs-comment'>-- output = [[b'e', b'ev', b'lve'],</span>
<a name="line-27553"></a><span class='hs-comment'>--           [b'h', b'ur', b'tee'],</span>
<a name="line-27554"></a><span class='hs-comment'>--           [b'i', b've', b'hte'],</span>
<a name="line-27555"></a><span class='hs-comment'>--           [b'i', b'en', b'nty']]</span>
<a name="line-27556"></a><span class='hs-comment'>-- ```</span>
<a name="line-27557"></a><span class='hs-comment'>-- </span>
<a name="line-27558"></a><span class='hs-comment'>-- Broadcasting `input` onto `pos` and `len`:</span>
<a name="line-27559"></a><span class='hs-comment'>-- </span>
<a name="line-27560"></a><span class='hs-comment'>-- ```</span>
<a name="line-27561"></a><span class='hs-comment'>-- input = b'thirteen'</span>
<a name="line-27562"></a><span class='hs-comment'>-- position = [1, 5, 7]</span>
<a name="line-27563"></a><span class='hs-comment'>-- length =   [3, 2, 1]</span>
<a name="line-27564"></a><span class='hs-comment'>-- </span>
<a name="line-27565"></a><span class='hs-comment'>-- output = [b'hir', b'ee', b'n"]</span>
<a name="line-27566"></a><span class='hs-comment'>-- ```</span>
<a name="line-27567"></a><span class='hs-definition'>substr</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span> <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27568"></a>                                                     <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27569"></a>          <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __input__: Tensor of strings</span>
<a name="line-27570"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __pos__: Scalar defining the position of first character in each substring</span>
<a name="line-27571"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __len__: Scalar defining the number of characters to include in each substring</span>
<a name="line-27572"></a>          <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __output__: Tensor of substrings</span>
<a name="line-27573"></a><span class='hs-definition'>substr</span> <span class='hs-varid'>input</span> <span class='hs-varid'>pos</span> <span class='hs-varid'>len</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27574"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"Substr"</span>
<a name="line-27575"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27576"></a>        <span class='hs-varid'>input</span> <span class='hs-varid'>pos</span> <span class='hs-varid'>len</span>
<a name="line-27577"></a><span class='hs-comment'>{-
<a name="line-27578"></a>attr {
<a name="line-27579"></a>  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
<a name="line-27580"></a>  name: "T"
<a name="line-27581"></a>  type: "type"
<a name="line-27582"></a>}
<a name="line-27583"></a>input_arg {
<a name="line-27584"></a>  description: "Tensor of strings" name: "input" type: DT_STRING
<a name="line-27585"></a>}
<a name="line-27586"></a>input_arg {
<a name="line-27587"></a>  description: "Scalar defining the position of first character in each substring"
<a name="line-27588"></a>  name: "pos"
<a name="line-27589"></a>  type_attr: "T"
<a name="line-27590"></a>}
<a name="line-27591"></a>input_arg {
<a name="line-27592"></a>  description: "Scalar defining the number of characters to include in each substring"
<a name="line-27593"></a>  name: "len"
<a name="line-27594"></a>  type_attr: "T"
<a name="line-27595"></a>}
<a name="line-27596"></a>output_arg {
<a name="line-27597"></a>  description: "Tensor of substrings" name: "output" type: DT_STRING
<a name="line-27598"></a>}
<a name="line-27599"></a>-}</span>
<a name="line-27600"></a>
<a name="line-27601"></a><a name="lookupTableInsert"></a><span class='hs-comment'>-- | Updates the table to associates keys with values.</span>
<a name="line-27602"></a><span class='hs-comment'>--</span>
<a name="line-27603"></a><span class='hs-comment'>-- The tensor `keys` must be of the same type as the keys of the table.</span>
<a name="line-27604"></a><span class='hs-comment'>-- The tensor `values` must be of the type of the table values.</span>
<a name="line-27605"></a><span class='hs-definition'>lookupTableInsert</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tin</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tin</span><span class='hs-layout'>,</span>
<a name="line-27606"></a>                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27607"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to the table.</span>
<a name="line-27608"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tin</span> <span class='hs-comment'>-- ^ __keys__: Any shape.  Keys to look up.</span>
<a name="line-27609"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __values__: Values to associate with keys.</span>
<a name="line-27610"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-27611"></a><span class='hs-definition'>lookupTableInsert</span> <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27612"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LookupTableInsert"</span>
<a name="line-27613"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tin"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tin</span><span class='hs-layout'>)</span>
<a name="line-27614"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27615"></a>        <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span>
<a name="line-27616"></a><span class='hs-comment'>{-
<a name="line-27617"></a>attr { name: "Tin" type: "type" }
<a name="line-27618"></a>attr { name: "Tout" type: "type" }
<a name="line-27619"></a>input_arg {
<a name="line-27620"></a>  description: "Handle to the table."
<a name="line-27621"></a>  is_ref: true
<a name="line-27622"></a>  name: "table_handle"
<a name="line-27623"></a>  type: DT_STRING
<a name="line-27624"></a>}
<a name="line-27625"></a>input_arg {
<a name="line-27626"></a>  description: "Any shape.  Keys to look up."
<a name="line-27627"></a>  name: "keys"
<a name="line-27628"></a>  type_attr: "Tin"
<a name="line-27629"></a>}
<a name="line-27630"></a>input_arg {
<a name="line-27631"></a>  description: "Values to associate with keys."
<a name="line-27632"></a>  name: "values"
<a name="line-27633"></a>  type_attr: "Tout"
<a name="line-27634"></a>}
<a name="line-27635"></a>-}</span>
<a name="line-27636"></a>
<a name="line-27637"></a><a name="sparseDenseCwiseDiv"></a><span class='hs-comment'>-- | Component-wise divides a SparseTensor by a dense Tensor.</span>
<a name="line-27638"></a><span class='hs-comment'>--</span>
<a name="line-27639"></a><span class='hs-comment'>-- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not</span>
<a name="line-27640"></a><span class='hs-comment'>-- the other direction.</span>
<a name="line-27641"></a><span class='hs-definition'>sparseDenseCwiseDiv</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v1</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>t</span><span class='hs-layout'>,</span>
<a name="line-27642"></a>                                               <span class='hs-conid'>OneOf</span> <span class='hs-chr'>'</span><span class='hs-keyglyph'>[</span><span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Double</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27643"></a>                                                       <span class='hs-layout'>(</span><span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span><span class='hs-varop'>.</span><span class='hs-conid'>Complex</span> <span class='hs-conid'>Float</span><span class='hs-layout'>)</span><span class='hs-layout'>,</span>
<a name="line-27644"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int16</span><span class='hs-layout'>,</span>
<a name="line-27645"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int32</span><span class='hs-layout'>,</span>
<a name="line-27646"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span><span class='hs-layout'>,</span>
<a name="line-27647"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int8</span><span class='hs-layout'>,</span>
<a name="line-27648"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word16</span><span class='hs-layout'>,</span>
<a name="line-27649"></a>                                                       <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Word</span><span class='hs-varop'>.</span><span class='hs-conid'>Word8</span><span class='hs-layout'>,</span> <span class='hs-conid'>Double</span><span class='hs-layout'>,</span>
<a name="line-27650"></a>                                                       <span class='hs-conid'>Float</span><span class='hs-keyglyph'>]</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27651"></a>                       <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v1</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a</span>
<a name="line-27652"></a>                                                <span class='hs-comment'>-- SparseTensor, possibly not in canonical ordering.</span>
<a name="line-27653"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.</span>
<a name="line-27654"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>Int</span><span class='hs-varop'>.</span><span class='hs-conid'>Int64</span> <span class='hs-comment'>-- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.</span>
<a name="line-27655"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v4</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __dense__: `R`-D.  The dense Tensor operand.</span>
<a name="line-27656"></a>                       <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Value</span> <span class='hs-varid'>t</span> <span class='hs-comment'>-- ^ __output__: 1-D.  The `N` values that are operated on.</span>
<a name="line-27657"></a><span class='hs-definition'>sparseDenseCwiseDiv</span> <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27658"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"SparseDenseCwiseDiv"</span>
<a name="line-27659"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"T"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>t</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27660"></a>        <span class='hs-varid'>sp_indices</span> <span class='hs-varid'>sp_values</span> <span class='hs-varid'>sp_shape</span> <span class='hs-varid'>dense</span>
<a name="line-27661"></a><span class='hs-comment'>{-
<a name="line-27662"></a>attr {
<a name="line-27663"></a>  allowed_values {
<a name="line-27664"></a>    list {
<a name="line-27665"></a>      type: DT_FLOAT
<a name="line-27666"></a>      type: DT_DOUBLE
<a name="line-27667"></a>      type: DT_INT64
<a name="line-27668"></a>      type: DT_INT32
<a name="line-27669"></a>      type: DT_UINT8
<a name="line-27670"></a>      type: DT_UINT16
<a name="line-27671"></a>      type: DT_INT16
<a name="line-27672"></a>      type: DT_INT8
<a name="line-27673"></a>      type: DT_COMPLEX64
<a name="line-27674"></a>      type: DT_COMPLEX128
<a name="line-27675"></a>      type: DT_QINT8
<a name="line-27676"></a>      type: DT_QUINT8
<a name="line-27677"></a>      type: DT_QINT32
<a name="line-27678"></a>      type: DT_HALF
<a name="line-27679"></a>    }
<a name="line-27680"></a>  }
<a name="line-27681"></a>  name: "T"
<a name="line-27682"></a>  type: "type"
<a name="line-27683"></a>}
<a name="line-27684"></a>input_arg {
<a name="line-27685"></a>  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
<a name="line-27686"></a>  name: "sp_indices"
<a name="line-27687"></a>  type: DT_INT64
<a name="line-27688"></a>}
<a name="line-27689"></a>input_arg {
<a name="line-27690"></a>  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
<a name="line-27691"></a>  name: "sp_values"
<a name="line-27692"></a>  type_attr: "T"
<a name="line-27693"></a>}
<a name="line-27694"></a>input_arg {
<a name="line-27695"></a>  description: "1-D.  Shape of the input SparseTensor."
<a name="line-27696"></a>  name: "sp_shape"
<a name="line-27697"></a>  type: DT_INT64
<a name="line-27698"></a>}
<a name="line-27699"></a>input_arg {
<a name="line-27700"></a>  description: "`R`-D.  The dense Tensor operand."
<a name="line-27701"></a>  name: "dense"
<a name="line-27702"></a>  type_attr: "T"
<a name="line-27703"></a>}
<a name="line-27704"></a>output_arg {
<a name="line-27705"></a>  description: "1-D.  The `N` values that are operated on."
<a name="line-27706"></a>  name: "output"
<a name="line-27707"></a>  type_attr: "T"
<a name="line-27708"></a>}
<a name="line-27709"></a>-}</span>
<a name="line-27710"></a>
<a name="line-27711"></a><a name="lookupTableImport"></a><span class='hs-comment'>-- | Replaces the contents of the table with the specified keys and values.</span>
<a name="line-27712"></a><span class='hs-comment'>--</span>
<a name="line-27713"></a><span class='hs-comment'>-- The tensor `keys` must be of the same type as the keys of the table.</span>
<a name="line-27714"></a><span class='hs-comment'>-- The tensor `values` must be of the type of the table values.</span>
<a name="line-27715"></a><span class='hs-definition'>lookupTableImport</span> <span class='hs-keyglyph'>::</span> <span class='hs-keyword'>forall</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tin</span> <span class='hs-varid'>tout</span> <span class='hs-varop'>.</span> <span class='hs-layout'>(</span><span class='hs-conid'>TensorType</span> <span class='hs-varid'>tin</span><span class='hs-layout'>,</span>
<a name="line-27716"></a>                                              <span class='hs-conid'>TensorType</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span> <span class='hs-keyglyph'>=&gt;</span>
<a name="line-27717"></a>                     <span class='hs-conid'>Tensor</span> <span class='hs-conid'>Ref</span> <span class='hs-conid'>Data</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span><span class='hs-varop'>.</span><span class='hs-conid'>ByteString</span> <span class='hs-comment'>-- ^ __table_handle__: Handle to the table.</span>
<a name="line-27718"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v2</span> <span class='hs-varid'>tin</span> <span class='hs-comment'>-- ^ __keys__: Any shape.  Keys to look up.</span>
<a name="line-27719"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Tensor</span> <span class='hs-varid'>v3</span> <span class='hs-varid'>tout</span> <span class='hs-comment'>-- ^ __values__: Values to associate with keys.</span>
<a name="line-27720"></a>                     <span class='hs-keyglyph'>-&gt;</span> <span class='hs-conid'>Build</span> <span class='hs-layout'>(</span><span class='hs-conid'>ControlNode</span><span class='hs-layout'>)</span>
<a name="line-27721"></a><span class='hs-definition'>lookupTableImport</span> <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span> <span class='hs-keyglyph'>|</span> <span class='hs-varid'>eqLengthGuard</span> <span class='hs-conid'>[]</span> <span class='hs-keyglyph'>=</span>
<a name="line-27722"></a>    <span class='hs-varid'>buildOp</span> <span class='hs-layout'>(</span><span class='hs-varid'>opDef</span> <span class='hs-str'>"LookupTableImport"</span>
<a name="line-27723"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tin"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tin</span><span class='hs-layout'>)</span>
<a name="line-27724"></a>             <span class='hs-varop'>&amp;</span> <span class='hs-varid'>opAttr</span> <span class='hs-str'>"Tout"</span> <span class='hs-varop'>.~</span> <span class='hs-varid'>tensorType</span> <span class='hs-layout'>(</span><span class='hs-varid'>undefined</span> <span class='hs-keyglyph'>::</span> <span class='hs-varid'>tout</span><span class='hs-layout'>)</span><span class='hs-layout'>)</span>
<a name="line-27725"></a>        <span class='hs-varid'>table_handle</span> <span class='hs-varid'>keys</span> <span class='hs-varid'>values</span>
<a name="line-27726"></a><span class='hs-comment'>{-
<a name="line-27727"></a>attr { name: "Tin" type: "type" }
<a name="line-27728"></a>attr { name: "Tout" type: "type" }
<a name="line-27729"></a>input_arg {
<a name="line-27730"></a>  description: "Handle to the table."
<a name="line-27731"></a>  is_ref: true
<a name="line-27732"></a>  name: "table_handle"
<a name="line-27733"></a>  type: DT_STRING
<a name="line-27734"></a>}
<a name="line-27735"></a>input_arg {
<a name="line-27736"></a>  description: "Any shape.  Keys to look up."
<a name="line-27737"></a>  name: "keys"
<a name="line-27738"></a>  type_attr: "Tin"
<a name="line-27739"></a>}
<a name="line-27740"></a>input_arg {
<a name="line-27741"></a>  description: "Values to associate with keys."
<a name="line-27742"></a>  name: "values"
<a name="line-27743"></a>  type_attr: "Tout"
<a name="line-27744"></a>}
<a name="line-27745"></a>-}</span>
</pre></body>
</html>