コード例 #1
0
def trainlayer(imgpatch, gt, sess):
    ##################
    """Feed Network"""
    ##################
    output = ns.netftlayer(imgpatch)

    '''l2 loss'''
    loss_l2 = tf.reduce_mean((output - gt)**2)

    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt, gt, gt], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels, config.model.loss_vgg, sess)
    loss_f = losses.loss_f / 3

    loss = loss_l2 * 0.6 + loss_f * 0.4

    #################
    """Add Summary"""
    #################
    tf.summary.scalar('loss/loss_l2', loss_l2 * 0.6)
    tf.summary.scalar('loss/loss_f', loss_f * 0.4)
    tf.summary.scalar('loss/total_loss', loss)
    tf.summary.image('input', imgpatch, max_outputs=12)
    tf.summary.image('output', output, max_outputs=12)
    tf.summary.image('ground_truth', gt, max_outputs=12)

    return loss, output, gt
コード例 #2
0
def test(args, model, device, test_loader, num_classes, text=False):
    model.eval()
    test_loss = 0
    correct = 0
    acc = []
    with torch.no_grad():
        for batch in test_loader:
            if text:
                data = batch.text[0]
                target = batch.label
                target = torch.autograd.Variable(target).long()
                if (data.size()[0] != args.batch_size):
                    continue
            else:
                data, target, index = batch

            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = cal_loss(output, target, reduction='sum')
            test_loss += loss.item()  # sum up batch loss
            pred = output[:, :num_classes].argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    acc = 100. * correct / len(test_loader.dataset)
    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset), acc))
    return acc, test_loss
コード例 #3
0
def train(args,
          model,
          device,
          train_loader,
          optimizer,
          epoch,
          num_classes=10,
          use_method=True,
          text=False):
    model.train()
    loss_a = []

    for batch_idx, batch in enumerate(train_loader):
        if text:
            data = batch.text[0]
            target = batch.label
            target = torch.autograd.Variable(target).long()
            if (data.size()[0] != args.batch_size):
                continue
        else:
            data, target, index = batch
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)

        if (use_method):
            loss = cal_loss(output,
                            target,
                            method=args.method,
                            reduction='mean',
                            p=args.smoothing)
        else:
            loss = cal_loss(output,
                            target,
                            method='nll',
                            reduction='mean',
                            p=args.smoothing)

        loss_a.append(loss.item())
        loss.backward()
        optimizer.step()

        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    return np.mean(loss_a)
def loss(gt_gray, output):
    global gt, out
    # with tf.Session() as sess:
    #     gt = sess.run(gt_gray)
    #     out = sess.run(output)
    # global index
    # fig = plt.figure()
    # ldrr = tf.slice(gt_gray,(0,0,0,0),(1,-1,-1,1))
    # ldr = tf.squeeze(ldrr)
    # ax = fig.add_subplot(1, 1, 1)
    # ax.imshow(ldr)
    # fig.savefig('../dataset/output' + str(index) + '.png')
    # index += 1
    # gt_gray = tf.Print(gt_gray, [gt_gray], "gt_gray image values=")
    # print ('\n')
    # output = tf.Print(output, [output], "output image values=")
    # print ('\n')
    """Build Losses"""
    loss_l1_reg = 0
    loss_l1 = tf.reduce_mean(tf.abs(output - gt_gray))

    # Calculate L2 Regularization value based on trainable weights in the network:
    weight_size = 0
    for variable in tf.trainable_variables():
        if not (variable.name.startswith('vgg_16')):
            loss_l1_reg += tf.reduce_sum(tf.cast(tf.abs(variable),
                                                 tf.float32)) * 2
            weight_size += tf.size(variable)
    loss_l2_reg = loss_l1_reg / tf.to_float(weight_size)
    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt_gray, gt_gray, gt_gray], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels,
                      '../loss/pretrained/vgg16.npy')
    loss = losses.loss / 3

    losss = loss * 0.5 + loss_l1 * 0.5 + loss_l2_reg * 0.2

    # writer.add_summary(tf.summary.image('gt_gray', gt_gray, max_outputs=12),index)
    # writer.add_summary(tf.summary.image('output', output,max_outputs=12),index)
    return losss
def trainlayer(tfrecord_path, sess):
    """Read tfrecord"""
    train_iter = data_iterator_new_high(tfrecord_path)
    img_gray, gt_gray = train_iter.get_next()

    """Feed Network"""
    output = ns.nethighlayer(img_gray)

    """Build Losses"""
    loss_l1_reg = 0
    loss_l1 = tf.reduce_mean(tf.abs(output - gt_gray))

    # Calculate L2 Regularization value based on trainable weights in the network:
    weight_size = 0
    for variable in tf.trainable_variables():
        if not (variable.name.startswith(config.model.loss_model)):
            loss_l1_reg += tf.reduce_sum(tf.abs(variable)) * 2
            weight_size += tf.size(variable)
    loss_l2_reg = loss_l1_reg / tf.to_float(weight_size)

    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt_gray, gt_gray, gt_gray], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels, config.model.loss_vgg, sess)
    loss_f = losses.loss_f / 3

    loss = loss_f * 0.5 + loss_l1 * 0.5 + loss_l2_reg * 0.2

    #################
    """Add Summary"""
    #################
    tf.summary.scalar('loss/loss_l1', loss_l1 * 0.5)
    tf.summary.scalar('loss/loss_l2_reg', loss_l2_reg * 0.2)
    tf.summary.scalar('loss/loss_f', loss_f * 0.5)
    tf.summary.scalar('loss/total_loss', loss)
    tf.summary.image('input', img_gray, max_outputs=12)
    tf.summary.image('output', output, max_outputs=12)
    tf.summary.image('ground_truth', gt_gray, max_outputs=12)

    return loss, output, gt_gray
def trainlayer(output, inputimg, gt, sess):
    '''l2 loss'''
    loss_l2 = tf.reduce_mean((output - gt)**2)
    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt, gt, gt], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels,
                      config.model.loss_vgg, sess)
    loss_f = losses.loss_f / 3

    # Calculate L2 Regularization value based on trainable weights in the network:
    weight_size = 0
    loss_l1_reg = 0
    for variable in tf.trainable_variables():
        if not (variable.name.startswith(config.model.loss_model)):
            loss_l1_reg += tf.reduce_sum(tf.abs(variable)) * 2
            weight_size += tf.size(variable)
    loss_l2_reg = loss_l1_reg / tf.to_float(weight_size)

    loss = loss_l2 * 0.6 + loss_f * 0.4 + loss_l2_reg * 0.2

    #################
    """Add Summary"""
    #################
    inputimg = tensor_norm_0_to_255(inputimg)
    output = tensor_norm_0_to_255(output)

    tf.summary.scalar('loss/loss_l2', loss_l2 * 0.6)
    tf.summary.scalar('loss/loss_f', loss_f * 0.4)
    tf.summary.scalar('loss/loss_l2_reg', loss_l2_reg * 0.2)
    tf.summary.scalar('loss/total_loss', loss)
    tf.summary.image('input', inputimg, max_outputs=12)
    tf.summary.image('output', output, max_outputs=12)
    tf.summary.image('ground_truth', gt, max_outputs=12)

    return loss, output, inputimg, gt
コード例 #7
0
def loss(gt_gray, output):
    """Build Losses"""
    loss_l1_reg = 0
    loss_l1 = tf.reduce_mean(tf.abs(output - gt_gray))

    # Calculate L2 Regularization value based on trainable weights in the network:
    weight_size = 0
    for variable in tf.trainable_variables():
        if not (variable.name.startswith('vgg_16')):
            loss_l1_reg += tf.reduce_sum(tf.cast(tf.abs(variable), tf.float32)) * 2
            weight_size += tf.size(variable)
    loss_l2_reg = loss_l1_reg / tf.to_float(weight_size)

    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt_gray, gt_gray, gt_gray], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels, '../loss/pretrained/vgg16.npy')
    loss = losses.loss / 3

    losss = loss * 0.5 + loss_l1 * 0.5 + loss_l2_reg * 0.2
    return losss
コード例 #8
0
def train(args,
          model,
          device,
          train_loader,
          optimizer,
          epoch,
          num_classes=10,
          use_gamblers=True,
          text=False):
    model.train()
    loss_a = []

    for batch_idx, batch in enumerate(train_loader):
        if text:
            data = batch.text[0]
            target = batch.label
            target = torch.autograd.Variable(target).long()
            if (data.size()[0] != args.batch_size):
                continue
        else:
            data, target, index = batch
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)

        loss = cal_loss(output,
                        target,
                        eps=1.0 - args.smoothing,
                        smoothing=True)
        # output = F.log_softmax(output, dim=1)

        # label smoothing
        # p = args.smoothing
        # output = (output * (2. * p - 1.)) + 1. - p
        # with torch.no_grad():
        #     # default to nll
        #     # if eps > num_classes, model learning is equivalent to nll
        #     eps = 1000 + num_classes

        #     # set lambda in the gambler's loss
        #     if (args.lambda_type == 'euc'):
        #         eps = ((1 - output[:, num_classes])**2 + 1e-10) / (torch.sum(
        #             (output[:, :num_classes])**2, (1, -1)))
        #     elif (args.lambda_type == 'mid'):
        #         eps = ((1 - output[:, num_classes]) + 1e-10) / (torch.sum(
        #             (output[:, :num_classes])**2, (1, -1)))
        #     elif (args.lambda_type == 'exp'):
        #         columns = output[:, :num_classes] + 1E-10
        #         eps = torch.exp(
        #             -1 * (torch.sum(torch.log(columns) * columns,
        #                             (1, -1)) + 1E-10) / torch.sum(
        #                                 columns, (1, -1)))
        #     elif (args.lambda_type == 'gmblers'):
        #         eps = args.eps

        #     if (not use_gamblers):
        #         eps = 1000 + num_classes

        # # compute gambler's loss
        # output = (output + (output[:, num_classes] / eps).unsqueeze(1) +
        #           1E-10).log()

        # loss = F.nll_loss(output, target)
        loss_a.append(loss.item())
        loss.backward()
        optimizer.step()

        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    return np.mean(loss_a)
def trainlayer(tfrecord_path, sess):

    """Read tfrecord"""
    train_iter = data_iterator_new_gray_bot(tfrecord_path)
    img, gt = train_iter.get_next()

    '''######## data augmentation #######'''
    # random [0,1] float number. geo_trans[0] for flipping left & right, geo_trans[1] for flipping up & down
    # geo_trans[2] for rotation
    geo_trans = tf.random_uniform([3], 0, 1.0, dtype=tf.float32)

    # flip left & right
    img_lr = tf.cond(tf.less(geo_trans[0], 0.5), lambda: tf.image.flip_left_right(img), lambda: img)
    gt_lr = tf.cond(tf.less(geo_trans[0], 0.5), lambda: tf.image.flip_left_right(gt), lambda: gt)

    # flip up & down
    img_ud = tf.cond(tf.less(geo_trans[1], 0.5), lambda: tf.image.flip_up_down(img_lr), lambda: img_lr)
    gt_ud = tf.cond(tf.less(geo_trans[1], 0.5), lambda: tf.image.flip_up_down(gt_lr), lambda: gt_lr)

    # rotation
    d = tf.cast(geo_trans[2]*4+0.5, tf.int32)
    img_ready = tf.image.rot90(img_ud, d)
    gt_ready = tf.image.rot90(gt_ud, d)

    """padding the bottom layer to refrain the ripple-boarder effect"""
    pad_width = 5
    paddings = tf.constant([[0, 0], [pad_width, pad_width], [pad_width, pad_width], [0, 0]])

    img_pad = tf.pad(img_ready, paddings, "REFLECT")

    '''# debug
    sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
    aa, bb = sess.run([img_ready, gt_ready])
    for i in range(32):
        plt.figure(i)
        plt.subplot(211)
        plt.imshow(aa[i, :, :, :])
        plt.subplot(212)
        plt.imshow(bb[i, :, :, :])
        plt.show()
    '''

    ##################
    """Feed Network"""
    ##################
    output = ns.netbotlayer_gray_lev_3(img_pad)

    '''Since the padding at the bottom layer, need to slice the padding off'''
    shape = tf.shape(output)
    output = tf.slice(output, [0, pad_width, pad_width, 0], [-1, shape[1]-pad_width*2, shape[2]-pad_width*2, -1])

    ##################
    """Build Losses"""
    ##################
    loss_l2_reg = 0
    loss_l1 = tf.reduce_mean(tf.abs(output - gt_ready))

    # Calculate L2 Regularization value based on trainable weights in the network:
    for variable in tf.trainable_variables():
        if not (variable.name.startswith(config.model.loss_model)):
            loss_l2_reg += tf.reduce_mean(tf.abs(variable)) * 2
        loss_l2_reg = tf.sqrt(loss_l2_reg)

    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt_ready, gt_ready, gt_ready], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels, config.model.loss_vgg, sess)
    loss_f = losses.loss_f / 3
    loss = loss_f * 0.5 + loss_l1 + loss_l2_reg

    #################
    """Add Summary"""
    #################
    tf.summary.scalar('loss/loss_l1', loss_l1)
    tf.summary.scalar('loss/loss_l2_reg', loss_l2_reg)
    tf.summary.scalar('loss/loss_f', loss_f * 0.5)
    tf.summary.scalar('loss/total_loss', loss)
    tf.summary.image('input', img_ready, max_outputs=12)
    tf.summary.image('output', output, max_outputs=12)
    tf.summary.image('ground_truth', gt_ready, max_outputs=12)

    return loss, img_ready, output, gt_ready
コード例 #10
0
    def train(self, n_epoch, opt):
        #train and validation
        steps = 0
        train_loss = 0
        start_time = time.time()
        with open(opt.log, "a") as f:
            f.write("\n-----training-----\n")

        for epoch in range(n_epoch):
            self.model.train()
            random.shuffle(self.train_batch_sampler)
            train_data_loader = DataLoader(
                self.train_data_set,
                batch_sampler=self.train_batch_sampler,
                collate_fn=self.train_data_set.collater)

            for iters in train_data_loader:

                src = iters[0].to(self.device)
                trg = iters[1].to(self.device)

                trg_input = trg[:, :-1]
                src_mask, trg_mask = create_masks(src, trg_input, self.device,
                                                  self.pad_idx)

                preds = self.model(src,
                                   trg_input,
                                   src_mask,
                                   trg_mask,
                                   train=True)
                preds = preds.view(-1, preds.size(-1))
                ys = trg[:, 1:].contiguous().view(-1)

                loss = cal_loss(preds, ys, self.pad_idx, smoothing=True)
                loss = loss / opt.accumulation_steps
                with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                    scaled_loss.backward()
                train_loss += loss.item()

                if (steps + 1) % opt.accumulation_steps == 0:
                    self.optimizer.step()
                    self.lr_scheduler.step()
                    self.optimizer.zero_grad()

                steps += 1
                #validation
                if steps % opt.check_interval == 0:
                    torch.cuda.empty_cache()
                    valid_bleu = self._valid_epoch()
                    torch.cuda.empty_cache()

                    train_loss = train_loss / opt.check_interval
                    num_save = steps // opt.check_interval
                    end_time = time.time()
                    with open(opt.log, "a") as f:
                        f.write("[Num Epoch %d] [Num Save %d] [Train Loss %.5f] [Valid BLEU %.3f] [TIME %.3f]\n" \
                                % (epoch+1, num_save, train_loss, valid_bleu*100, end_time - start_time))

                    if num_save >= opt.max_steps // opt.check_interval - 5:
                        #save model
                        torch.save(
                            self.model.state_dict(),
                            opt.save_model + "/n_" + str(num_save) + ".model")

                    self.model.train()
                    start_time = time.time()
                    train_loss = 0

                #終了
                if steps == opt.max_steps:
                    return
コード例 #11
0
def trainlayer(tfrecord_path, sess):
    ###################
    """Read tfrecord"""
    ###################
    train_iter = data_iterator_new_gray_high(tfrecord_path)
    img, gt = train_iter.get_next()
    '''######## data augmentation #######'''
    # random [0,1] float number. geo_trans[0] for flipping left & right, geo_trans[1] for flipping up & down
    # geo_trans[2] for rotation
    geo_trans = tf.random_uniform([3], 0, 1.0, dtype=tf.float32)

    # flip left & right
    img_lr = tf.cond(tf.less(geo_trans[0], 0.5),
                     lambda: tf.image.flip_left_right(img), lambda: img)
    gt_lr = tf.cond(tf.less(geo_trans[0], 0.5),
                    lambda: tf.image.flip_left_right(gt), lambda: gt)

    # flip up & down
    img_ud = tf.cond(tf.less(geo_trans[1], 0.5),
                     lambda: tf.image.flip_up_down(img_lr), lambda: img_lr)
    gt_ud = tf.cond(tf.less(geo_trans[1], 0.5),
                    lambda: tf.image.flip_up_down(gt_lr), lambda: gt_lr)

    # rotation
    d = tf.cast(geo_trans[2] * 4 + 0.5, tf.int32)
    img_ready = tf.image.rot90(img_ud, d)
    gt_ready = tf.image.rot90(gt_ud, d)

    ##################
    """Feed Network"""
    ##################

    output = ns.nethighlayer_gray(img_ready)

    ##################
    """Build Losses"""
    ##################

    loss_l2_reg = 0
    loss_l1 = tf.reduce_mean(tf.abs(output - gt_ready))

    # Calculate L2 Regularization value based on trainable weights in the network:
    for variable in tf.trainable_variables():
        if not (variable.name.startswith(config.model.loss_model)):
            loss_l2_reg += tf.reduce_mean(tf.abs(variable)) * 2
        loss_l2_reg = tf.sqrt(loss_l2_reg)
    '''perceptual loss'''
    # duplicate the colour channel to be 3 same layers.
    output_3_channels = tf.concat([output, output, output], axis=3)
    gt_gray_3_channels = tf.concat([gt_ready, gt_ready, gt_ready], axis=3)

    losses = cal_loss(output_3_channels, gt_gray_3_channels,
                      config.model.loss_vgg, sess)

    # perceptual loss
    loss_f = losses.loss_f / 3
    loss = loss_f * 0.5 + loss_l1 + loss_l2_reg

    #################
    """Add Summary"""
    #################
    tf.summary.scalar('loss/loss_l1', loss_l1)
    tf.summary.scalar('loss/loss_l2_reg', loss_l2_reg)
    tf.summary.scalar('loss/loss_f', loss_f * 0.5)
    tf.summary.scalar('loss/total_loss', loss)
    tf.summary.image('input', img_ready, max_outputs=12)
    tf.summary.image('output', output, max_outputs=12)
    tf.summary.image('ground_truth', gt_ready, max_outputs=12)
    """
    ''' restore high frequency vars '''
    variables_to_restore = []
    for v in tf.trainable_variables():
        if v.name.startswith('high'):
            variables_to_restore.append(v)

    model_ckp = config.model.ckp_path_high + config.model.ckp_lev_scale + lev_scale + '/'
    saver_h = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V2)
    ckpt = tf.train.get_checkpoint_state(model_ckp)
    if ckpt and ckpt.model_checkpoint_path:
        full_path = tf.train.latest_checkpoint(model_ckp)
        saver_h.restore(sess, full_path)
    sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

    aa, bb, cc, dd = sess.run([img_ready, output, gt_ready, output - gt_ready])
    aa = np.squeeze(aa)
    bb = np.squeeze(bb)
    cc = np.squeeze(cc)
    dd = np.squeeze(dd)

    for i in range(config.train.batch_size_ft):
        plt.figure(0)
        plt.subplot(221)
        plt.imshow(aa[i, :, :], cmap='gray')
        plt.subplot(222)
        plt.imshow(bb[i, :, :], cmap='gray')
        plt.subplot(223)
        plt.imshow(cc[i, :, :], cmap='gray')
        plt.subplot(224)
        plt.imshow(dd[i, :, :], cmap='gray')
        plt.show()
    """
    return loss, img_ready, output, gt_ready