예제 #1
0
def main():

    makedirs.mkdirs(os.path.join(args['checkpoints_dir'], args['name']))
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    A_train_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                      dataName=args['domainA'],
                                                      phase='train'),
                                     batch_size=args['batch_size'],
                                     num_workers=args['num_workers'],
                                     shuffle=True)
    A_val_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                    dataName=args['domainA'],
                                                    phase='val'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'],
                                   shuffle=False)

    B_train_loader = data.DataLoader(imageLoader(args['data_path'],
                                                 dataName=args['domainB'],
                                                 phase='train'),
                                     batch_size=args['batch_size'],
                                     num_workers=args['num_workers'],
                                     shuffle=True)
    B_val_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                    dataName=args['domainB'],
                                                    phase='val'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'],
                                   shuffle=False)
    model = deeplabGan()
    model.initialize(args)

    # multi GPUS
    # model = torch.nn.DataParallel(model,device_ids=args['device_ids']).cuda()
    best_prec = 0
    for epoch in range(args['n_epoch']):
        train(A_train_loader, B_train_loader, model, epoch)
        if epoch % 2 == 0:
            prec = validate(A_val_loader, model,
                            nn.CrossEntropyLoss(size_average=False), False)
            prec = validate(B_val_loader, model,
                            nn.CrossEntropyLoss(size_average=False), True)
            is_best = prec > best_prec
            best_prec = max(prec, best_prec)
            if is_best:
                model.save('best')
예제 #2
0
def main():
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    test_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                   dataName=args['domainB'],
                                                   phase='val'),
                                  batch_size=args['batch_size'],
                                  num_workers=args['num_workers'],
                                  shuffle=False)
    gym = deeplabGanS2TWithRefine4()
    gym.initialize(args)
    gym.load(
        '/home/ben/mathfinder/PROJECT/AAAI2017/our_Method/v3/deeplab_feature_adaptation/checkpoints/Lip_to_July_g1/best_Ori_on_B_model.pth'
    )
    gym.eval()
    matrix = ConfusionMatrix(args['label_nums'])
    for i, (image, label) in enumerate(test_loader):
        label = label.cuda(async=True)
        target_var = torch.autograd.Variable(label, volatile=True)

        gym.test(False, image)
        output = gym.output

        matrix = update_confusion_matrix(matrix, output.data, label)
    print(matrix.avg_f1score())
    print(matrix.f1score())
예제 #3
0
def main():
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    test_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainB'], phase='test'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'], shuffle=False)
    gym = deeplabG1G2()
    gym.initialize(args)
    gym.load('/home/ben/mathfinder/PROJECT/AAAI2017/our_Method/v3/deeplab_feature_adaptation/checkpoints/lr_g1=0.00001_lr_g2=0.00000001_interval_g1=6_interval_d1=6_net_D=lsganMultOutput_D_if_adaptive=True/best_Ori_on_B_model.pth')
    gym.eval()
    matrix = ConfusionMatrix(args['label_nums'])
    for i, (image, label) in enumerate(test_loader):
        label = label.cuda(async=True)
        target_var = torch.autograd.Variable(label, volatile=True)

        gym.test(image)
        output = gym.output

        matrix = update_confusion_matrix(matrix, output.data, label)
    print(matrix.all_acc())
예제 #4
0
def main():

    makedirs.mkdirs(os.path.join(args['checkpoints_dir'], args['name']))
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    A_train_loader = data.DataLoader(imageLabelLoader(args['data_path'],dataName=args['domainA'], phase='train'), batch_size=args['batch_size'],
                                  num_workers=args['num_workers'], shuffle=True)
    A_val_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainA'], phase='val'), batch_size=args['batch_size'],
                                num_workers=args['num_workers'], shuffle=False)

    B_train_loader = data.DataLoader(imageLoader(args['data_path'], dataName=args['domainB'], phase='train+unlabel'),
                                     batch_size=args['batch_size'],
                                     num_workers=args['num_workers'], shuffle=True)
    B_val_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainB'], phase='val'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'], shuffle=False)
    model = deeplabGanWithRefine()
    model.initialize(args)

    # multi GPUS
    # model = torch.nn.DataParallel(model,device_ids=args['device_ids']).cuda()
    Iter = 0
    if args['resume']:
        if os.path.isfile(args['resume']):
            logger.info("=> loading checkpoint '{}'".format(args['resume']))
            model.load(args['resume'])
        else:
            print("=> no checkpoint found at '{}'".format(args['resume']))

    best_Ori_on_B = 0
    best_Ada_on_B = 0
    model.train()
    for epoch in range(args['n_epoch']):
        # train(A_train_loader, B_train_loader, model, epoch)
        # switch to train mode
        for i, (A_image, A_label) in enumerate(A_train_loader):
            Iter += 1
            B_image = next(iter(B_train_loader))
            model.set_input({'A': A_image, 'A_label': A_label, 'B': B_image})
            model.optimize_parameters()
            output = model.output
            if i % args['print_freq'] == 0:
                matrix = ConfusionMatrix()
                update_confusion_matrix(matrix, output.data, A_label)
                logger.info('Time: {time}\t'
                      'Epoch/Iter: [{epoch}/{Iter}]\t'
                      'loss: {loss:.4f}\t'
                      'loss_R: {loss_R:.4f}\t'
                      'acc: {accuracy:.4f}\t'
                      'fg_acc: {fg_accuracy:.4f}\t'
                      'avg_prec: {avg_precision:.4f}\t'
                      'avg_rec: {avg_recall:.4f}\t'
                      'avg_f1: {avg_f1core:.4f}\t'
                      'loss_G: {loss_G:.4f}\t'
                      'loss_D: {loss_D:.4f}\t'.format(
                    time=time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
                    epoch=epoch, Iter=Iter, loss=model.loss_P.data[0],
                    loss_R=model.loss_R.data[0], accuracy=matrix.accuracy(),
                    fg_accuracy=matrix.fg_accuracy(), avg_precision=matrix.avg_precision(),
                    avg_recall=matrix.avg_recall(), avg_f1core=matrix.avg_f1score(),
                    loss_G=model.loss_G.data[0], loss_D=model.loss_D.data[0]))

            if Iter % 1000 == 0:
                model.eval()
                acc_Ori_on_A = validate(A_val_loader, model, nn.CrossEntropyLoss(size_average=False), False)
                acc_Ori_on_B = validate(B_val_loader, model, nn.CrossEntropyLoss(size_average=False), False)
                acc_Ada_on_B = validate(B_val_loader, model, nn.CrossEntropyLoss(size_average=False), True)
                prec_Ori_on_B = acc_Ori_on_B['avg_f1score']
                prec_Ada_on_B = acc_Ada_on_B['avg_f1score']

                is_best = prec_Ori_on_B > best_Ori_on_B
                best_Ori_on_B = max(prec_Ori_on_B, best_Ori_on_B)
                if is_best:
                    model.save('best_Ori_on_B', Iter=Iter, epoch=epoch, acc={'acc_Ori_on_A':acc_Ori_on_A, 'acc_Ori_on_B':acc_Ori_on_B, 'acc_Ada_on_B':acc_Ada_on_B})

                is_best = prec_Ada_on_B > best_Ada_on_B
                best_Ada_on_B = max(prec_Ada_on_B, best_Ada_on_B)
                if is_best:
                    model.save('best_Ada_on_B', Iter=Iter, epoch=epoch, acc={'acc_Ori_on_A':acc_Ori_on_A, 'acc_Ori_on_B':acc_Ori_on_B, 'acc_Ada_on_B':acc_Ada_on_B})
                model.train()
예제 #5
0
def main():
    train_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                    dataName=args['domainB'],
                                                    phase='train'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'],
                                   shuffle=True)
    val_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                  dataName=args['domainB'],
                                                  phase='val'),
                                 batch_size=args['batch_size'],
                                 num_workers=args['num_workers'],
                                 shuffle=False)
    model = Deeplab()
    print(model)
    if args['pretrain_model'] != '':
        pretrained_dict = torch.load(args['weigths_pool'] + '/' +
                                     args['pretrain_model'])
        model.weights_init(pretrained_dict=pretrained_dict)
    else:
        model.apply(weights_init())

    ignored_params = list(map(id, model.fc8_1.parameters()))
    ignored_params.extend(list(map(id, model.fc8_2.parameters())))
    ignored_params.extend(list(map(id, model.fc8_3.parameters())))
    ignored_params.extend(list(map(id, model.fc8_4.parameters())))
    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    optimizer = torch.optim.SGD([
        {
            'params': base_params
        },
        {
            'params': get_parameters(model.fc8_1, 'weight'),
            'lr': args['l_rate'] * 10
        },
        {
            'params': get_parameters(model.fc8_2, 'weight'),
            'lr': args['l_rate'] * 10
        },
        {
            'params': get_parameters(model.fc8_3, 'weight'),
            'lr': args['l_rate'] * 10
        },
        {
            'params': get_parameters(model.fc8_4, 'weight'),
            'lr': args['l_rate'] * 10
        },
        {
            'params': get_parameters(model.fc8_1, 'bias'),
            'lr': args['l_rate'] * 20
        },
        {
            'params': get_parameters(model.fc8_2, 'bias'),
            'lr': args['l_rate'] * 20
        },
        {
            'params': get_parameters(model.fc8_3, 'bias'),
            'lr': args['l_rate'] * 20
        },
        {
            'params': get_parameters(model.fc8_4, 'bias'),
            'lr': args['l_rate'] * 20
        },
    ],
                                lr=args['l_rate'],
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = nn.CrossEntropyLoss(size_average=False).cuda()
    # multi GPUS
    model = torch.nn.DataParallel(model, device_ids=args['device_ids']).cuda()
    best_prec = 0
    for epoch in range(args['n_epoch']):
        train(train_loader, model, criterion, optimizer, epoch)

        if epoch > 0 and epoch % 9 == 0:
            prec = validate(val_loader, model, criterion)
            is_best = prec > best_prec
            best_prec = max(prec, best_prec)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': 'deeplab(indoor)',
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec,
                    'optimizer': optimizer.state_dict(),
                },
                is_best,
                filename='./checkpoint/indoor_epoch_' + str(epoch) +
                '.pth.tar')