Пример #1
0
	def build_compute_loss(self, att_logits, labels):
		loss_vec=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=att_logits, labels=labels, name=None)
		loss_cls = tf.reduce_mean(loss_vec)
		reg_var_list = self.get_variables_by_name([""], False)
		loss_reg = loss_func.l2_regularization_loss(reg_var_list[""], self.weight_decay)
		loss = loss_cls + loss_reg
		return loss, loss_vec
Пример #2
0
    def train_op(self):
        # Collect variables for training
        tvars = [
            var for var in tf.trainable_variables()
            if var.op.name.startswith('refer_seg')
        ]
        print('Collecting variables for training:')
        for var in tvars:
            print('\t%s' % var.name)
        print('Done.')

        # Collect variables for regularization
        rvars = [
            var for var in tf.trainable_variables()
            if var.op.name.startswith('refer_seg')
        ]
        print('Collecting variables for regularization:')
        for var in rvars:
            print('\t%s' % var.name)
        print('Done.')

        # Define loss
        self.target_coarse = tf.image.resize_bilinear(self.target_fine,
                                                      [self.vf_h, self.vf_w])
        self.cls_loss = loss.weighed_logistic_loss(self.pred, self.target_fine,
                                                   1, 1)
        self.reg_loss = loss.l2_regularization_loss(rvars, self.weight_decay)
        self.sum_loss = self.cls_loss + self.reg_loss

        # Define learning rate
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.train.polynomial_decay(self.start_lr,
                                                       self.global_step,
                                                       self.lr_decay_step,
                                                       self.end_lr,
                                                       self.lr_decay_rate)

        # Define optimization process
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        grads_and_vars = optimizer.compute_gradients(self.sum_loss,
                                                     var_list=tvars)

        var_lr_mult = {}
        for var in tvars:
            var_lr_mult[var] = 2.0 if var.op.name.find('biases') > 0 else 1.0
        print('Setting variable learning rate multiplication:')
        for var in tvars:
            print('\t%s: %f' % (var.name, var_lr_mult[var]))
        print('Done.')

        grads_and_vars = [
            ((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)
            for g, v in grads_and_vars
        ]
        self.train_step = optimizer.apply_gradients(
            grads_and_vars, global_step=self.global_step)
Пример #3
0
    def train_op(self):
        if self.conv5:
            tvars = [var for var in tf.trainable_variables() if var.op.name.startswith('text_objseg')
                     or var.name.startswith('res5') or var.name.startswith('res4')
                     or var.name.startswith('res3')]
        else:
            tvars = [var for var in tf.trainable_variables() if var.op.name.startswith('text_objseg')]
        reg_var_list = [var for var in tvars if var.op.name.find(r'DW') > 0 or var.name[-9:-2] == 'weights']
        print('Collecting variables for regularization:')
        for var in reg_var_list: print('\t%s' % var.name)
        print('Done.')

        # define loss
        self.target = tf.image.resize_bilinear(self.target_fine, [self.vf_h, self.vf_w])
        self.cls_loss_c5 = loss.weighed_logistic_loss(self.up_c5, self.target_fine, 1, 1)
        self.cls_loss_c4 = loss.weighed_logistic_loss(self.up_c4, self.target_fine, 1, 1)
        self.cls_loss_c3 = loss.weighed_logistic_loss(self.up_c3, self.target_fine, 1, 1)
        self.cls_loss = loss.weighed_logistic_loss(self.up, self.target_fine, 1, 1)
        self.cls_loss_all = 0.7 * self.cls_loss + 0.1 * self.cls_loss_c5 \
                            + 0.1 * self.cls_loss_c4 + 0.1 * self.cls_loss_c3
        self.reg_loss = loss.l2_regularization_loss(reg_var_list, self.weight_decay)
        self.cost = self.cls_loss_all + self.reg_loss

        # learning rate
        lr = tf.Variable(0.0, trainable=False)
        self.learning_rate = tf.train.polynomial_decay(self.start_lr, lr, self.lr_decay_step, end_learning_rate=0.00001,
                                                       power=0.9)

        # optimizer
        if self.optimizer == 'adam':
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
        else:
            raise ValueError("Unknown optimizer type %s!" % self.optimizer)

        # learning rate multiplier
        grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)
        var_lr_mult = {}
        for var in tvars:
            if var.op.name.find(r'biases') > 0:
                var_lr_mult[var] = 2.0
            elif var.name.startswith('res5') or var.name.startswith('res4') or var.name.startswith('res3'):
                var_lr_mult[var] = 1.0
            else:
                var_lr_mult[var] = 1.0
        print('Variable learning rate multiplication:')
        for var in tvars:
            print('\t%s: %f' % (var.name, var_lr_mult[var]))
        print('Done.')
        grads_and_vars = [((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v) for g, v in
                          grads_and_vars]

        # training step
        self.train_step = optimizer.apply_gradients(grads_and_vars, global_step=lr)
Пример #4
0
    def train_op(self):
        # define loss
        self.target = tf.image.resize_bilinear(self.target_fine,
                                               [self.vf_h, self.vf_w])
        tvars = [
            var for var in tf.trainable_variables()
            if var.op.name.startswith('text_objseg')
            or var.op.name.startswith('ResNet/fc1000')
        ]
        reg_var_list = [var for var in tvars if var.op.name.find(r'DW') > 0]
        self.mid_loss_5 = loss.weighed_logistic_loss(self.up1_5, self.target)
        self.mid_loss_4 = loss.weighed_logistic_loss(self.up1_4, self.target)
        self.mid_loss_3 = loss.weighed_logistic_loss(self.up1_3, self.target)

        self.cls_loss = loss.weighed_logistic_loss(self.pred, self.target)
        self.reg_loss = loss.l2_regularization_loss(reg_var_list,
                                                    self.weight_decay)
        self.cost = self.cls_loss + self.reg_loss + self.mid_loss_5 + self.mid_loss_4 + self.mid_loss_3

        # learning rate
        lr = tf.Variable(0.0, trainable=False)
        self.learning_rate = tf.train.polynomial_decay(
            self.start_lr,
            lr,
            self.lr_decay_step,
            end_learning_rate=0.00001,
            power=0.9)

        # optimizer
        if self.optimizer == 'adam':
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
        else:
            raise ValueError("Unknown optimizer type %s!" % self.optimizer)

        # learning rate multiplier
        grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)
        var_lr_mult = {
            var: (2.0 if var.op.name.find(r'biases') > 0 else 1.0)
            for var in tvars
        }
        grads_and_vars = [
            ((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)
            for g, v in grads_and_vars
        ]

        # training step
        self.train_step = optimizer.apply_gradients(grads_and_vars,
                                                    global_step=lr)
Пример #5
0
    def build_model(self):
        self.imcrop_batch = tf.placeholder(
            tf.float32, [self.batch_size, self.input_H, self.input_W, 3])
        self.text_seq_batch = tf.placeholder(tf.int32,
                                             [self.rnn_cells, self.batch_size])
        self.label_batch = tf.placeholder(
            tf.float32, [self.batch_size, self.input_H, self.input_W, 1])
        self.scores = self.forward(self.imcrop_batch, self.text_seq_batch)

        self.train_var_list = self.get_train_var_list()

        #Calculate loss
        self.cls_loss = loss.weighed_logistic_loss(self.scores,
                                                   self.label_batch)

        #Add regularization to weight matrices (excluding bias)
        reg_var_list = [
            var for var in tf.trainable_variables()
            if (var in self.train_var_list) and (
                var.name[-9:-2] == 'weights' or var.name[-8:-2] == 'Matrix')
        ]

        reg_loss = loss.l2_regularization_loss(reg_var_list, self.weight_decay)
        self.total_loss = self.cls_loss + reg_loss
Пример #6
0
var_lr_mult = {
    var: (deeplab_lr_mult if var.name.startswith('deeplab') else 1.0)
    for var in train_var_list
}
print('Variable learning rate multiplication:')
for var in train_var_list:
    print('\t%s: %f' % (var.name, var_lr_mult[var]))
print('Done.')

################################################################################
# Loss function and accuracy
################################################################################

cls_loss = loss.weighed_logistic_loss(scores, label_batch, pos_loss_mult,
                                      neg_loss_mult)
reg_loss = loss.l2_regularization_loss(reg_var_list, weight_decay)
total_loss = cls_loss + reg_loss

################################################################################
# Solver
################################################################################

global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_lr,
                                           global_step,
                                           lr_decay_step,
                                           lr_decay_rate,
                                           staircase=True)
solver = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                    momentum=momentum)
# Compute gradients
Пример #7
0
    def train_op(self):
        if self.conv5:
            tvars = [
                var for var in tf.trainable_variables()
                if var.op.name.startswith('text_objseg')
                or var.name.startswith('res5') or var.name.startswith('res4')
                or var.name.startswith('res3')
            ]
        else:
            tvars = [
                var for var in tf.trainable_variables()
                if var.op.name.startswith('text_objseg')
            ]

        if self.freeze_bn:
            tvars = [
                var for var in tvars
                if 'beta' not in var.name and 'gamma' not in var.name
            ]
        reg_var_list = [
            var for var in tvars
            if var.op.name.find(r'DW') > 0 or var.name[-9:-2] == 'weights'
        ]
        print('Collecting variables for regularization:')
        for var in reg_var_list:
            print('\t%s' % var.name)
        print('Done.')

        # define loss
        self.target = tf.image.resize_bilinear(self.target_fine,
                                               [self.vf_h, self.vf_w])
        self.cls_loss_c5 = loss.weighed_logistic_loss(self.up_c5,
                                                      self.target_fine, 1, 1)
        self.cls_loss_c4 = loss.weighed_logistic_loss(self.up_c4,
                                                      self.target_fine, 1, 1)
        #         self.cls_loss_c3 = loss.weighed_logistic_loss(self.up_c3, self.target_fine, 1, 1)
        self.cls_loss = loss.weighed_logistic_loss(self.up, self.target_fine,
                                                   1, 1)
        self.cls_loss_all = 0.8 * self.cls_loss + 0.1 * self.cls_loss_c5 \
                            + 0.1 * self.cls_loss_c4
        self.reg_loss = loss.l2_regularization_loss(reg_var_list,
                                                    self.weight_decay)
        self.cost = self.cls_loss_all + self.reg_loss

        # learning rate
        self.train_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.train.polynomial_decay(
            self.start_lr,
            self.train_step,
            self.lr_decay_step,
            end_learning_rate=0.00001,
            power=0.9)

        # optimizer
        if self.optimizer == 'adam':
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
        else:
            raise ValueError("Unknown optimizer type %s!" % self.optimizer)

        # learning rate multiplier
        grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)
        var_lr_mult = {}
        for var in tvars:
            if var.op.name.find(r'biases') > 0:
                var_lr_mult[var] = 2.0
            elif var.name.startswith('res5') or var.name.startswith(
                    'res4') or var.name.startswith('res3'):
                var_lr_mult[var] = 1.0
            else:
                var_lr_mult[var] = 1.0
        print('Variable learning rate multiplication:')
        for var in tvars:
            print('\t%s: %f' % (var.name, var_lr_mult[var]))
        print('Done.')
        grads_and_vars = [
            ((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)
            for g, v in grads_and_vars
        ]

        # training step
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            self.train = optimizer.apply_gradients(grads_and_vars,
                                                   global_step=self.train_step)

        # Summary in tensorboard
        tf.summary.scalar('loss_all', self.cls_loss_all)
        #         tf.summary.scalar('loss_c3', self.cls_loss_c3)
        tf.summary.scalar('loss_c4', self.cls_loss_c4)
        tf.summary.scalar('loss_c5', self.cls_loss_c5)
        tf.summary.scalar('loss_last', self.cls_loss)
        pred = tf.convert_to_tensor(tf.cast(self.up > 0, tf.int32), tf.int32)
        labl = self.target_fine
        intersect = tf.reduce_sum(tf.cast(
            tf.logical_and(tf.cast(pred, tf.bool), tf.cast(labl, tf.bool)),
            tf.int32),
                                  axis=(1, 2, 3))
        union = tf.reduce_sum(tf.cast(
            tf.logical_or(tf.cast(pred, tf.bool), tf.cast(labl, tf.bool)),
            tf.int32),
                              axis=(1, 2, 3))
        self.mIoU = tf.reduce_mean(tf.divide(intersect, union))
        tf.summary.scalar('mean_IOU', self.mIoU)
        self.merged = tf.summary.merge_all()
print('Done.')

# Collect learning rate for trainable variables
var_lr_mult = {var: (vgg_lr_mult if var.name.startswith('vgg_local') else 1.0)
               for var in train_var_list}
print('Variable learning rate multiplication:')
for var in train_var_list:
    print('\t%s: %f' % (var.name, var_lr_mult[var]))
print('Done.')

################################################################################
# Loss function and accuracy
################################################################################

cls_loss = loss.weighed_logistic_loss(scores, label_batch, pos_loss_mult, neg_loss_mult)
reg_loss = loss.l2_regularization_loss(reg_var_list, weight_decay)
total_loss = cls_loss + reg_loss

def compute_accuracy(scores, labels):
    is_pos = (labels != 0)
    is_neg = np.logical_not(is_pos)
    num_all = labels.shape[0]
    num_pos = np.sum(is_pos)
    num_neg = num_all - num_pos

    is_correct = np.logical_xor(scores < 0, is_pos)
    accuracy_all = np.sum(is_correct) / num_all
    accuracy_pos = np.sum(is_correct[is_pos]) / num_pos
    accuracy_neg = np.sum(is_correct[is_neg]) / num_neg
    return accuracy_all, accuracy_pos, accuracy_neg
Пример #9
0
    def train_op(self):
        # Collect variables for training
        tvars = [
            var for var in tf.trainable_variables()
            if var.op.name.startswith('refer_seg')
        ]
        print('Collecting variables for training:')
        for var in tvars:
            print('\t%s' % var.name)
        print('Done.')

        # Collect variables for regularization
        rvars = [
            var for var in tf.trainable_variables()
            if var.op.name.startswith('refer_seg')
        ]
        print('Collecting variables for regularization:')
        for var in rvars:
            print('\t%s' % var.name)
        print('Done.')

        self.target_coarse = tf.image.resize_bilinear(self.target_fine,
                                                      [self.vf_h, self.vf_w])
        self.mask_indices = tf.slice(tf.where(self.target_coarse > 0.5),
                                     [0, 0], [-1, 3])
        #self.masked_score = tf.gather(self.score, self.mask_indices)
        gathered_score = []
        for b in range(self.batch_size):
            mask_indices = tf.where(
                tf.slice(self.target_coarse, [b, 0, 0, 0], [1, -1, -1, -1]) >
                0.5)
            mask_indices = tf.slice(mask_indices, [0, 0], [-1, 3])
            print(mask_indices.get_shape().as_list())
            masked_score = tf.gather_nd(
                tf.slice(self.score, [b, 0, 0, 0], [1, -1, -1, -1]),
                mask_indices)
            print(masked_score.get_shape().as_list())
            gathered_score.append(masked_score)
        self.gathered_score = tf.stack(gathered_score)
        print(self.gathered_score.get_shape().as_list())
        #self.masked_score = tf.gather_nd(self.score, self.mask_indices)
        self.mean_score = tf.reduce_mean(self.gathered_score, axis=1)

        self.spk_loss = tf.reduce_sum(
            tf.squared_difference(self.mean_score, self.encoding))
        self.reg_loss = loss.l2_regularization_loss(rvars, self.weight_decay)
        self.sum_loss = self.spk_loss + self.reg_loss

        # Define learning rate
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.train.polynomial_decay(self.start_lr,
                                                       self.global_step,
                                                       self.lr_decay_step,
                                                       self.end_lr,
                                                       self.lr_decay_rate)

        # Define optimization process
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        grads_and_vars = optimizer.compute_gradients(self.sum_loss,
                                                     var_list=tvars)

        var_lr_mult = {}
        for var in tvars:
            var_lr_mult[var] = 2.0 if var.op.name.find('biases') > 0 else 1.0
        print('Setting variable learning rate multiplication:')
        for var in tvars:
            print('\t%s: %f' % (var.name, var_lr_mult[var]))
        print('Done.')

        grads_and_vars = [
            ((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)
            for g, v in grads_and_vars
        ]
        self.train_step = optimizer.apply_gradients(
            grads_and_vars, global_step=self.global_step)