def _train(self, loss_val, var_list, optimizer): grads = optimizer.compute_gradients(loss_val, var_list=var_list) for grad, var in grads: utils.add_gradient_summary(grad, var, collections=self.summary_collections) return optimizer.apply_gradients(grads)
def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads)
def _train(self, loss_val, var_list, optimizer): print("train variables are") for v in var_list: print(v.op.name, v.get_shape()) grads = optimizer.compute_gradients(loss_val, var_list=var_list) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads)
def train(loss_val, var_list): """ 定义采用那种算法的优化器,然后计算loss函数的梯度值,并加入到summary中 Parameters ---------- loss_val: 计算的loss值 var_list: 需要计算梯度的变量 """ # 采用Adam算法的优化器 optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) # 以下可以合并成一步minimize grads = optimizer.compute_gradients(loss_val, var_list = var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads)
def train(loss, var_list): optimizer = tf.train.AdamOptimizer(0.0001) grads = optimizer.compute_gradients(loss, var_list=var_list) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads)