def __call__(self,inputs,state,scope=None):"""Gated recurrent unit (GRU) with nunits cells."""with vs.variable_scope(scope or type(self).__name__): # "GRUCell" with vs.variable_scope("Gates"): # reset gate and update gate. # We start with bias of 1.0 to not reset and not update. r,u = array_ops.split(1,2,linear([inputs,state],2 * self._num_units,True,1.0)) r,u = sigmoID(r),sigmoID(u) with vs.variable_scope("CandIDate"): c = tanh(linear([inputs,r * state],self._num_units,True)) new_h = u * state + (1 - u) * c # store the activations,everything else is the same self.activations = [r,u,c]return new_h,new_h
在此之后,我将以下面的方式连接激活,然后在调用此GRU单元的脚本中返回它们
@propertydef activations(self): return self._activations@activations.setterdef activations(self,activations_array): print "PRINT THIS" concactivations = tf.concat(concat_dim=0,values=activations_array,name='concat_activations') self._activations = tf.reshape(tensor=concactivations,shape=[-1],name='flatten_activations')
我以下面的方式调用GRU单元
outputs,state = rnn.rnn(cell=cell,inputs=x,initial_state=initial_state,sequence_length=s)
其中s是批处理长度数组,其中包含输入批处理的每个元素中的时间戳数.
最后我拿到了
fetched = sess.run(fetches=cell.activations,Feed_dict=Feed_dict)
执行时我收到以下错误
Traceback(最近一次调用最后一次):
文件“xxx.py”,第162行,in
fetched = sess.run(fetches = cell.activations,Feed_dict = Feed_dict)
文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第315行,在运行中
return self._run(None,fetches,Feed_dict)
在_run中输入文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第511行
Feed_dict_string)
在_do_run中输入文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第564行
target_List)
在_do_call中输入文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第588行
six.reraise(e_type,e_value,e_traceback)
在_do_call中输入文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第571行
return fn(* args)
在_run_fn中输入文件“/xxx/local/lib/python2.7/site-packages/tensorflow/python/clIEnt/session.py”,第555行
return tf_session.TF_Run(session,Feed_dict,fetch_List,target_List)
tensorflow.python.pywrap_tensorflow.StatusNotOK:无效参数:为RNN / cond_396 / ClusterableGRUCell / flatten_activations返回的张量:0无效.
有人可以通过传递可变长度序列来了解如何在最后一步从GRU单元获取激活吗?谢谢.
解决方法 要从最后一步获取激活,您需要将激活作为状态的一部分,由tf.rnn返回. 总结以上是内存溢出为你收集整理的python – 获取具有可变序列长度的激活时的Tensorflow GRU单元错误全部内容,希望文章能够帮你解决python – 获取具有可变序列长度的激活时的Tensorflow GRU单元错误所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)