def validate(epoch, val_loader, net, criterion, restore, best):
    net.eval()
    batch_inputs = []
    batch_outputs = []
    batch_labels = []
    for vi, data in enumerate(val_loader, 0):
        inputs, labels = data
        inputs = Variable(inputs, volatile=True).cuda()
        labels = Variable(labels, volatile=True).cuda()

        outputs = net(inputs)

        batch_inputs.append(inputs.cpu())
        batch_outputs.append(outputs.cpu())
        batch_labels.append(labels.cpu())

    batch_inputs = torch.cat(batch_inputs)
    batch_outputs = torch.cat(batch_outputs)
    batch_labels = torch.cat(batch_labels)
    val_loss = criterion(batch_outputs, batch_labels)
    val_loss = val_loss.data[0]

    batch_inputs = batch_inputs.data
    batch_outputs = batch_outputs.data
    batch_labels = batch_labels.data.numpy()
    batch_prediction = batch_outputs.max(1)[1].squeeze_(1).numpy()

    mean_iu = calculate_mean_iu(batch_prediction, batch_labels, num_classes)

    if val_loss < best[0]:
        best[0] = val_loss
        best[1] = mean_iu
        best[2] = epoch
        torch.save(
            net.state_dict(),
            os.path.join(
                ckpt_path, 'epoch_%d_validation_loss_%.4f_mean_iu_%.4f.pth' %
                (epoch + 1, val_loss, mean_iu)))

        to_save_dir = os.path.join(ckpt_path, str(epoch + 1))
        rmrf_mkdir(to_save_dir)

        for idx, tensor in enumerate(
                zip(batch_inputs, batch_prediction, batch_labels)):
            pil_input = restore(tensor[0])
            pil_output = Image.fromarray(
                colorize_mask(tensor[1], ignored_label=ignored_label), 'RGB')
            pil_label = Image.fromarray(
                colorize_mask(tensor[2], ignored_label=ignored_label), 'RGB')
            pil_input.save(os.path.join(to_save_dir, '%d_img.png' % idx))
            pil_output.save(os.path.join(to_save_dir, '%d_out.png' % idx))
            pil_label.save(os.path.join(to_save_dir, '%d_label.png' % idx))

    print '--------------------------------------------------------'
    print '[validation loss %.4f]' % val_loss
    print '[best validation loss %.4f], [best_mean_iu %.4f], [best epoch %d]' % (
        best[0], best[1], best[2] + 1)
    print '--------------------------------------------------------'

    net.train()
Пример #2
0
def validate(val_loader, net, criterion, optimizer, epoch, restore):
    net.eval()
    criterion.cpu()
    input_batches = []
    output_batches = []
    label_batches = []

    for vi, data in enumerate(val_loader, 0):
        inputs, labels = data
        inputs = Variable(inputs, volatile=True).cuda()
        labels = Variable(labels, volatile=True).cuda()

        outputs = net(inputs)

        input_batches.append(inputs.cpu().data)
        output_batches.append(outputs.cpu())
        label_batches.append(labels.cpu())

    input_batches = torch.cat(input_batches)
    output_batches = torch.cat(output_batches)
    label_batches = torch.cat(label_batches)
    val_loss = criterion(output_batches, label_batches)
    val_loss = val_loss.data[0]

    output_batches = output_batches.cpu().data[:, :num_classes - 1, :, :]
    label_batches = label_batches.cpu().data.numpy()
    prediction_batches = output_batches.max(1)[1].squeeze_(1).numpy()

    mean_iu = calculate_mean_iu(prediction_batches, label_batches, num_classes)

    writer.add_scalar('loss', val_loss, epoch + 1)
    writer.add_scalar('mean_iu', mean_iu, epoch + 1)

    if val_loss < train_record['best_val_loss']:
        train_record['best_val_loss'] = val_loss
        train_record['corr_epoch'] = epoch + 1
        train_record['corr_mean_iu'] = mean_iu
        snapshot_name = 'epoch_%d_loss_%.4f_mean_iu_%.4f_lr_%.8f' % (
            epoch + 1, val_loss, mean_iu, train_args['new_lr'])
        torch.save(net.state_dict(),
                   os.path.join(ckpt_path, exp_name, snapshot_name + '.pth'))
        torch.save(
            optimizer.state_dict(),
            os.path.join(ckpt_path, exp_name, 'opt_' + snapshot_name + '.pth'))

        with open(exp_name + '.txt', 'a') as f:
            f.write(snapshot_name + '\n')

        to_save_dir = os.path.join(ckpt_path, exp_name, str(epoch + 1))
        rmrf_mkdir(to_save_dir)

        x = []
        for idx, tensor in enumerate(
                zip(input_batches, prediction_batches, label_batches)):
            if random.random() > val_args['img_sample_rate']:
                continue
            pil_input = restore(tensor[0])
            pil_output = colorize_mask(tensor[1])
            pil_label = colorize_mask(tensor[2])
            pil_input.save(os.path.join(to_save_dir, '%d_img.png' % idx))
            pil_output.save(os.path.join(to_save_dir, '%d_out.png' % idx))
            pil_label.save(os.path.join(to_save_dir, '%d_label.png' % idx))
            x.extend([
                pil_to_tensor(pil_input.convert('RGB')),
                pil_to_tensor(pil_label.convert('RGB')),
                pil_to_tensor(pil_output.convert('RGB'))
            ])
        x = torch.stack(x, 0)
        x = vutils.make_grid(x, nrow=3, padding=5)
        writer.add_image(snapshot_name, x)

    print '--------------------------------------------------------'
    print '[val loss %.4f], [mean iu %.4f]' % (val_loss, mean_iu)
    print '[best val loss %.4f], [mean iu %.4f], [epoch %d]' % (
        train_record['best_val_loss'], train_record['corr_mean_iu'],
        train_record['corr_epoch'])
    print '--------------------------------------------------------'

    net.train()
    criterion.cuda()
Пример #3
0
    31: 16,
    32: 17,
    33: 18
}

raw_train_img = os.path.join(raw_img_path, 'train')
raw_train_mask = os.path.join(raw_mask_path, 'train')
raw_val_img = os.path.join(raw_img_path, 'val')
raw_val_mask = os.path.join(raw_mask_path, 'val')

processed_train_img = os.path.join(processed_train_path, 'img')
processed_train_mask = os.path.join(processed_train_path, 'mask')
processed_val_img = os.path.join(processed_val_path, 'img')
processed_val_mask = os.path.join(processed_val_path, 'mask')

rmrf_mkdir(processed_path)
rmrf_mkdir(processed_train_path)
rmrf_mkdir(processed_train_img)
rmrf_mkdir(processed_train_mask)
rmrf_mkdir(processed_val_path)
rmrf_mkdir(processed_val_img)
rmrf_mkdir(processed_val_mask)

for d in os.listdir(raw_train_img):
    cate_img_dir = os.path.join(raw_train_img, d)
    cate_mask_dir = os.path.join(raw_train_mask, d)
    for img_name in os.listdir(cate_img_dir):
        shutil.copy(os.path.join(cate_img_dir, img_name), processed_train_img)
    for mask_name in os.listdir(cate_mask_dir):
        if not mask_name.endswith('labelIds.png'):
            continue
Пример #4
0
import numpy as np

from utils.io import rmrf_mkdir
from .config import *

val_percentage = 0.02

img_list = [os.path.splitext(img)[0] for img in os.listdir(raw_mask_path)]
img_list = np.random.permutation(img_list)

val_data_num = int(len(img_list) * val_percentage)
val_data = img_list[:val_data_num]
train_data = img_list[val_data_num:]

rmrf_mkdir(processed_train_path)
rmrf_mkdir(processed_val_path)

for i, t in enumerate(train_data):
    os.symlink(os.path.join(voc_image_dir_path, t + '.jpg'),
               os.path.join(train_path, image_dir_name, t + '.jpg'))
    os.symlink(os.path.join(voc_mask_dir_path, t + '.png'),
               os.path.join(train_path, mask_dir_name, t + '.png'))
    print 'processed %d train samples' % i

for i, v in enumerate(val_data):
    os.symlink(os.path.join(voc_image_dir_path, v + '.jpg'),
               os.path.join(val_path, image_dir_name, v + '.jpg'))
    os.symlink(os.path.join(voc_mask_dir_path, v + '.png'),
               os.path.join(val_path, mask_dir_name, v + '.png'))
    print 'processed %d val samples' % i
import numpy as np

from configuration import voc_image_dir_path, voc_mask_dir_path, train_path, val_path, image_dir_name, mask_dir_name
from utils.io import rmrf_mkdir

val_percentage = 0.02

img_list = [os.path.splitext(img)[0] for img in os.listdir(voc_mask_dir_path)]
img_list = np.random.permutation(img_list)

val_data_num = int(len(img_list) * val_percentage)
val_data = img_list[:val_data_num]
train_data = img_list[val_data_num:]

rmrf_mkdir(train_path)
rmrf_mkdir(os.path.join(train_path, image_dir_name))
rmrf_mkdir(os.path.join(train_path, mask_dir_name))
rmrf_mkdir(val_path)
rmrf_mkdir(os.path.join(val_path, image_dir_name))
rmrf_mkdir(os.path.join(val_path, mask_dir_name))

for i, t in enumerate(train_data):
    os.symlink(os.path.join(voc_image_dir_path, t + '.jpg'),
               os.path.join(train_path, image_dir_name, t + '.jpg'))
    os.symlink(os.path.join(voc_mask_dir_path, t + '.png'),
               os.path.join(train_path, mask_dir_name, t + '.png'))
    print 'processed %d train samples' % i

for i, v in enumerate(val_data):
    os.symlink(os.path.join(voc_image_dir_path, v + '.jpg'),