Esempio n. 1
0
def evaluate():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--file',
                        type=str,
                        help='model file path',
                        required=True)

    args = parser.parse_args()
    file = args.file
    print("evaluating: ", file)
    dataset = fcn.datasets.VOC2011ClassSeg('seg11valid')
    n_class = len(dataset.class_names)

    model = FCN8s()
    chainer.serializers.load_npz(file, model)

    # model = fcn.models.FCN8s()
    # model_file = fcn.models.FCN8s.download()
    # chainer.serializers.load_npz(model_file, model)

    gpu = 0

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    lbl_preds, lbl_trues = [], []
    for i in tqdm.trange(min(len(dataset), 5)):
        datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16(
            dataset.get_example(i))
        x_data = np.expand_dims(datum, axis=0)
        if gpu >= 0:
            x_data = cuda.to_gpu(x_data)

        with chainer.no_backprop_mode():
            x = chainer.Variable(x_data)
            with chainer.using_config('train', False):
                model(x)
                lbl_pred = chainer.functions.argmax(model.score, axis=1)[0]
                lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
                print(np.unique(lbl_pred))
        # lbl_pred_np = cp.asnumpy(lbl_pred)
        # print(lbl_pred_np.shape)
        # print(np.max(lbl_pred_np))
        # cv2.imshow('img', lbl_pred_np.astype(cv2.CV_8U))
        # cv2.waitKey(0)
        lbl_preds.append(lbl_pred)
        lbl_trues.append(lbl_true)

    acc, acc_cls, mean_iu, fwavacc = fcn.utils.label_accuracy_score(
        lbl_trues, lbl_preds, n_class)
    print('Accuracy: %.4f' % (100 * acc))
    print('AccClass: %.4f' % (100 * acc_cls))
    print('Mean IoU: %.4f' % (100 * mean_iu))
    print('Fwav Acc: %.4f' % (100 * fwavacc))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--gpu', type=int, default=0, help='gpu id')
    parser.add_argument('--modelfile', help='pretrained model file of FCN8')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-7,
                        help='init learning rate')
    parser.add_argument('--name',
                        type=str,
                        default='exp',
                        help='name of the experiment')
    parser.add_argument('--resume',
                        type=int,
                        default=0,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file to resume from')
    parser.add_argument('--lambda1',
                        default=5,
                        type=float,
                        help='lambda1 param')
    parser.add_argument('--lambda2',
                        default=1,
                        type=float,
                        help='lambda2 param')
    parser.add_argument('--lambda3',
                        default=1.5,
                        type=float,
                        help='lambda3 param')

    args = parser.parse_args()

    resume = args.resume
    device = args.gpu

    if resume:
        load_snapshot_path = args.snapshot
        load_model_path = args.modelfile
    else:
        pretrained_model_path = args.modelfile

    experiment = args.name
    lr = args.lr
    optim = Adam
    training_interval = (20000, 'iteration')
    snapshot_interval = (1000, 'iteration')
    lambd1 = args.lambda1
    lambd2 = args.lambda2
    lambd3 = args.lambda3
    updtr = VOC_GAIN_Updater2

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr) + "\n")
    f.write("optimizer - " + str(optim) + "\n")
    f.write("lambd1 - " + str(lambd1) + "\n")
    f.write("lambd2 - " + str(lambd2) + "\n")
    f.write("lambd3 - " + str(lambd3) + "\n")
    f.write("training_interval - " + str(training_interval) + "\n")
    f.write("Updater - " + str(updtr) + "\n")
    f.close()

    if resume:
        model = FCN8s()
        chainer.serializers.load_npz(load_model_path, model)
    else:
        model = FCN8s()
        chainer.serializers.load_npz(pretrained_model_path, model)

    if device >= 0:
        model.to_gpu(device)
    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1, shuffle=False)

    optimizer = Adam(alpha=lr)
    optimizer.setup(model)

    updater = updtr(iterator,
                    optimizer,
                    device=device,
                    lambd1=lambd1,
                    lambd2=lambd2)
    trainer = Trainer(updater, training_interval)
    log_keys = [
        'epoch', 'iteration', 'main/AM_Loss', 'main/CL_Loss', 'main/TotalLoss'
    ]
    trainer.extend(
        extensions.LogReport(log_keys, (10, 'iteration'),
                             log_name='log' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(
        extensions.ProgressBar(training_length=training_interval,
                               update_interval=100))
    trainer.extend(extensions.snapshot(filename='snapshot' + experiment),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target, "model" + experiment),
                   trigger=snapshot_interval)
    trainer.extend(
        extensions.PlotReport(['main/AM_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/am_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/CL_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/cl_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/SG_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/sg_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/TotalLoss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/total_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(log_keys[2:],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/all_loss.png',
                              grid=True,
                              marker=" "))

    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)
    print("Running - - ", experiment)
    print('initial lr ', lr)
    print('optimizer ', optim)
    print('lambd1 ', lambd1)
    print('lambd2 ', lambd2)
    print('lambd3', lambd3)
    trainer.run()
Esempio n. 3
0
	pretrained_file = args.pretrained
	trained_file = args.trained
	device = args.device
	shuffle = args.shuffle
	whole = args.whole
	name = args.name
	N = args.no

	dataset = VOCSemanticSegmentationDataset()
	iterator = SerialIterator(dataset, 1, shuffle=shuffle, repeat=False)
	converter = chainer.dataset.concat_examples
	os.makedirs('viz/'+name, exist_ok=True)
	no_of_classes = 20
	device = 0
	pretrained = FCN8s()
	trainer = FCN8s()
	load_npz(pretrained_file, pretrained)
	load_npz(trained_file, trained)
	
	if device >=0:
		pretrained.to_gpu()
		trained.to_gpu()
	i = 0
	
	while not iterator.is_new_epoch:
		
		if not whole and i >= N:
			break

		image, labels = converter(iterator.next())
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--gpu', type=int, default=0, help='gpu id')
    parser.add_argument('--modelfile', help='pretrained model file of FCN8')
    parser.add_argument('--lr',
                        type=float,
                        default=5 * 1e-4,
                        help='init learning rate')
    parser.add_argument('--name',
                        type=str,
                        default='exp',
                        help='init learning rate')
    parser.add_argument('--resume',
                        type=int,
                        default=0,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file to resume from')

    args = parser.parse_args()

    resume = args.resume
    device = args.gpu

    if resume:
        load_snapshot_path = args.snapshot
        load_model_path = args.modelfile
    else:
        load_model_path = args.modelfile

    experiment = args.name
    lr = args.lr
    lr_trigger_interval = (5, 'epoch')
    optim = Adam

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr) + "\n")
    f.write("optimizer - " + str(optim))
    f.write("lr_trigger_interval - " + str(lr_trigger_interval) + "\n")
    f.close()

    if not resume:
        # Add the FC layers to original FConvN for GAIN
        model_own = FCN8s()
        model_original = fcn.models.FCN8s()
        model_file = fcn.models.FCN8s.download()
        chainer.serializers.load_npz(model_file, model_original)

        for layers in model_original._children:
            setattr(model_own, layers, getattr(model_original, layers))
        del (model_original, model_file)

    else:
        model_own = FCN8s()
        chainer.serializers.load_npz(load_model_path, model_own)

    if device >= 0:
        model_own.to_gpu(device)

    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1)
    optimizer = Adam(alpha=lr)
    optimizer.setup(model_own)

    updater = VOC_ClassificationUpdater(iterator, optimizer, device=device)
    trainer = Trainer(updater, (50, 'epoch'))
    log_keys = ['epoch', 'iteration', 'main/Loss']
    trainer.extend(
        extensions.LogReport(log_keys, (100, 'iteration'),
                             log_name='log' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(
        extensions.ProgressBar(training_length=(50, 'epoch'),
                               update_interval=500))
    trainer.extend(extensions.snapshot(filename='snapshot' + experiment),
                   trigger=(5, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target, "model" + experiment),
                   trigger=(5, 'epoch'))
    trainer.extend(
        extensions.PlotReport(['main/Loss'],
                              'iteration', (100, 'iteration'),
                              file_name=experiment + '/loss.png',
                              grid=True,
                              marker=" "))

    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)
    print("Running - - ", experiment)
    print('initial lr ', lr)
    print('lr_trigger_interval ', lr_trigger_interval)
    trainer.run()
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--device', type=int, default=-1, help='gpu id')
    parser.add_argument('--lr_init',
                        type=float,
                        default=5 * 1e-5,
                        help='init learning rate')
    # parser.add_argument('--lr_trigger', type=float, default=5, help='trigger to decreace learning rate')
    # parser.add_argument('--lr_target', type=float, default=5*1e-5, help='target learning rate')
    # parser.add_argument('--lr_factor', type=float, default=.75, help='decay factor')
    parser.add_argument('--name',
                        type=str,
                        default='classifier',
                        help='name of the experiment')
    parser.add_argument('--resume',
                        type=bool,
                        default=False,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file of the trainer to resume from')

    args = parser.parse_args()

    resume = args.resume
    device = args.device

    if resume:
        load_snapshot_path = args.snapshot

    experiment = args.name
    lr_init = args.lr_init
    # lr_target = args.lr_target
    # lr_factor = args.lr_factor
    # lr_trigger_interval = (args.lr_trigger, 'epoch')

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr_init) + "\n")
    f.write("optimizer - " + str(Adam))
    # f.write("lr_trigger_interval - "+str(lr_trigger_interval)+"\n")
    f.close()

    if not resume:
        # Add the FC layers to original FCN for GAIN
        model_own = FCN8s()
        model_original = fcn.models.FCN8s()
        model_file = fcn.models.FCN8s.download()
        chainer.serializers.load_npz(model_file, model_original)

        for layers in model_original._children:
            setattr(model_own, layers, getattr(model_original, layers))
        del (model_original, model_file)

    else:
        model_own = FCN8s()

    if device >= 0:
        model_own.to_gpu(device)

    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1)
    optimizer = Adam(alpha=lr_init)
    optimizer.setup(model_own)

    updater = VOC_ClassificationUpdater(iterator, optimizer, device=device)
    trainer = Trainer(updater, (100, 'epoch'))
    log_keys = ['epoch', 'iteration', 'main/Loss']
    trainer.extend(
        extensions.LogReport(log_keys, (100, 'iteration'),
                             log_name='log_' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(extensions.snapshot(filename=experiment +
                                       "_snapshot_{.updater.iteration}"),
                   trigger=(5, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target,
        experiment + "_model_{.updater.iteration}"),
                   trigger=(5, 'epoch'))
    trainer.extend(
        extensions.PlotReport(['main/Loss'],
                              'iteration', (100, 'iteration'),
                              file_name='trainer_' + experiment + '/loss.png',
                              grid=True,
                              marker=" "))

    # trainer.extend(extensions.ExponentialShift('lr', lr_factor, target=lr_target), trigger=lr_trigger_interval)
    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)

    print("Running - - ", experiment)
    print('initial lr ', lr_init)
    # print('lr_trigger_interval ', lr_trigger_interval)
    trainer.run()