Ejemplo n.º 1
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)
Ejemplo n.º 2
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)
Ejemplo n.º 3
0
 def build_labels_optimizer(self, loss_val, var_list):
     with tf.variable_scope("labels"):
         labels_optimizer = tf.train.AdamOptimizer(self.labels_learning_rate)
         labels_grads = labels_optimizer.compute_gradients(loss_val, var_list=var_list)
         if self.debug:
             # print(len(var_list))
             for grad, var in labels_grads:
                 utils.add_gradient_summary(grad, var)
         return labels_optimizer.apply_gradients(labels_grads)
Ejemplo n.º 4
0
    def train_optimizer(self):
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.var_list = tf.trainable_variables()
        #self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
        self.grads = optimizer.compute_gradients(self.loss, var_list=self.var_list)
        if cfgs.debug:
            # print(len(var_list))
            for grad, var in self.grads:
                utils.add_gradient_summary(grad, var)

        self.train_op = optimizer.apply_gradients(self.grads)
Ejemplo n.º 5
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    grads_and_vars = optimizer.compute_gradients(
        loss_val, var_list=var_list
    )  # 该函数为函数minimize()的第一部分:对var_list中的变量计算loss的梯度,返回一个以元组(gradient, variable)组成的列表
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads_and_vars:
            utils.add_gradient_summary(
                grad, var
            )  # 是函数minimize()的第二部分:将计算出的梯度应用到变量上,返回一个应用指定的梯度的操作Operation,对global_step做自增操作
    return optimizer.apply_gradients(grads_and_vars)
Ejemplo n.º 6
0
def train(loss_val, var_list):
    learning_rate = tf.train.exponential_decay(1e-4,
                                               MAX_ITERATION,
                                               100,
                                               0.96,
                                               staircase=True)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)
Ejemplo n.º 7
0
def train(loss_val, var_list):
    """
    :param loss_val:  损失函数
    :param var_list:  需要优化的值
    :return:
    """
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)  # 返回迭代梯度
Ejemplo n.º 8
0
def train(loss_val, var_list):
    print('===FCN==train==')
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    print("=======FLAGS.learning_rate=======")
    print(FLAGS.learning_rate)
    print("=======optimizer=======")
    print(optimizer)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    print("=======grads==loss_val=====")
    print(loss_val)
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)
Ejemplo n.º 9
0
    def train(cls, loss_val, var_list, flags):
        """
        Create train_op and learning_rate.
        """

        learning_rate = tf.Variable(flags.learning_rate, trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate)
        # optimizer = tf.train.RMSPropOptimizer(learning_rate)
        grads = optimizer.compute_gradients(loss_val, var_list=var_list)
        if flags.debug:
            # print(len(var_list))
            for grad, var in grads:
                utils.add_gradient_summary(grad, var)
        train_op = optimizer.apply_gradients(grads)
        return learning_rate, train_op
Ejemplo n.º 10
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    ## 下面是参照tf api
    ## Compute gradients of loss_val for the variables in var_list.
    ## This is the first part of minimize().
    ## loss: A Tensor containing the value to minimize.
    ## var_list: Optional list of tf.Variable to update to minimize loss.
    ##   Defaults to the list of variables collected in the graph under the key GraphKey.TRAINABLE_VARIABLES.
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    if FLAGS.debug:
        # print(len(var_list))
        for grad, var in grads:
            utils.add_gradient_summary(grad, var)
    ## 下面是参照tf api
    ## Apply gradients to variables.
    ## This is the second part of minimize(). It returns an Operation that applies gradients.
    return optimizer.apply_gradients(grads)
Ejemplo n.º 11
0
def train(loss, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1)
    grads = optimizer.compute_gradients(loss, var_list=var_list)
    for grad, var in grads:
        utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)
Ejemplo n.º 12
0
def train(loss_val, var_list):
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1)
    grads = optimizer.compute_gradients(loss_val, var_list=var_list)
    for grad, var in grads:
        utils.add_gradient_summary(grad, var)
    return optimizer.apply_gradients(grads)