Ejemplo n.º 1
0
def main():
    print('Training Process\nInitializing...\n')
    config.init_env()

    val_dataset = data_pth.pc_data(config.pc_net.data_root, status=STATUS_TEST)

    val_loader = DataLoader(val_dataset, batch_size=config.pc_net.validation.batch_sz,
                            num_workers=config.num_workers,shuffle=True,drop_last=True)

    # create model
    net = DGCNN(n_neighbor=config.pc_net.n_neighbor,num_classes=config.pc_net.num_classes)
    net = torch.nn.DataParallel(net)
    net = net.to(device=config.device)
    optimizer = optim.Adam(net.parameters(), config.pc_net.train.lr,
                          weight_decay=config.pc_net.train.weight_decay)

    print(f'loading pretrained model from {config.pc_net.ckpt_file}')
    checkpoint = torch.load(config.pc_net.ckpt_file)
    net.module.load_state_dict(checkpoint['model'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    best_prec1 = checkpoint['best_prec1']
    resume_epoch = checkpoint['epoch']

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, 0.5)
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device=config.device)

    # for p in net.module.feature.parameters():
    #     p.requires_grad = False

    with torch.no_grad():
        prec1 = validate(val_loader, net, resume_epoch)

    print('curr accuracy: ', prec1)
    print('best accuracy: ', best_prec1)
print("train len: " + str(len(src_train_loader) * args.batch_size))
'''
src_train_loader = DataLoader(src_trainset, num_workers=NWORKERS, batch_size=args.batch_size,
                               sampler=src_train_sampler, drop_last=True)
src_val_loader = DataLoader(src_trainset, num_workers=NWORKERS, batch_size=args.test_batch_size,
                             sampler=src_valid_sampler)
src_test_loader = DataLoader(src_testset, num_workers=NWORKERS, batch_size=args.test_batch_size)
'''
# ==================
# Init Model
# ==================
if args.model == 'pointnet':
    model = PointNet(args)
elif args.model == 'dgcnn':
    print(args.emb_dims)
    model = DGCNN(args, output_channels=64, inf=True)
else:
    raise Exception("Not implemented")

if len(args.weights) > 0:
    model.load_state_dict(torch.load(
        args.weights, map_location=lambda storage, loc: storage),
                          strict=False)
    print("Loaded pretrained weights!")

#model.linear3 = nn.Linear(256, 40)# only for classification
model = model.to(device)

for n, p in model.named_parameters():
    if n.startswith("linearEval") == False:
        p.requires_grad = False
Ejemplo n.º 3
0
#src_test_loader = DataLoader(src_testset, num_workers=NWORKERS, batch_size=args.test_batch_size, shuffle=True, drop_last=True)
print("train len: " + str(len(src_train_loader) * args.batch_size))
'''
src_train_loader = DataLoader(src_trainset, num_workers=NWORKERS, batch_size=args.batch_size,
                               sampler=src_train_sampler, drop_last=True)
src_val_loader = DataLoader(src_trainset, num_workers=NWORKERS, batch_size=args.test_batch_size,
                             sampler=src_valid_sampler)
src_test_loader = DataLoader(src_testset, num_workers=NWORKERS, batch_size=args.test_batch_size)
'''
# ==================
# Init Model
# ==================
if args.model == 'pointnet':
    model = PointNet(args)
elif args.model == 'dgcnn':
    model = DGCNN(args, output_channels=32, inf=False)
else:
    raise Exception("Not implemented")

if len(args.weights) > 0:
    model.load_state_dict(torch.load(
        args.weights, map_location=lambda storage, loc: storage),
                          strict=False)
    print("Loaded pretrained weights!")

#model.linear3 = nn.Linear(256, 40)# only for classification
model = model.to(device)
'''
for n, p in model.named_parameters():
    if n.startswith("linearEval") == False:
        p.requires_grad = False
Ejemplo n.º 4
0
def main():
    print('Training Process\nInitializing...\n')
    config.init_env()

    train_dataset = data_pth.pc_data(config.pc_net.data_root,
                                     status=STATUS_TRAIN)
    val_dataset = data_pth.pc_data(config.pc_net.data_root, status=STATUS_TEST)

    train_loader = DataLoader(train_dataset,
                              batch_size=config.pc_net.train.batch_sz,
                              num_workers=config.num_workers,
                              shuffle=True,
                              drop_last=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=config.pc_net.validation.batch_sz,
                            num_workers=config.num_workers,
                            shuffle=True,
                            drop_last=True)

    best_prec1 = 0
    resume_epoch = 0
    best_map = 0
    # create model
    net = DGCNN(n_neighbor=config.pc_net.n_neighbor,
                num_classes=config.pc_net.num_classes)
    net = torch.nn.DataParallel(net)
    net = net.to(device=config.device)
    optimizer = optim.Adam(net.parameters(),
                           config.pc_net.train.lr,
                           weight_decay=config.pc_net.train.weight_decay)

    if config.pc_net.train.resume:
        print(f'loading pretrained model from {config.pc_net.ckpt_file}')
        checkpoint = torch.load(config.pc_net.ckpt_file)
        net.module.load_state_dict(
            {k[7:]: v
             for k, v in checkpoint['model'].items()})
        optimizer.load_state_dict(checkpoint['optimizer'])
        best_prec1 = checkpoint['best_prec1']
        if config.pc_net.train.resume_epoch is not None:
            resume_epoch = config.pc_net.train.resume_epoch
        else:
            resume_epoch = checkpoint['epoch'] + 1

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 20, 0.7)
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device=config.device)

    for epoch in range(resume_epoch, config.pc_net.train.max_epoch):

        lr_scheduler.step(epoch=epoch)
        # train
        train(train_loader, net, criterion, optimizer, epoch)
        # validation
        with torch.no_grad():
            prec1, retrieval_map = validate(val_loader, net, epoch)

        # save checkpoints
        if prec1 > best_prec1:
            best_prec1 = prec1
            save_ckpt(epoch, best_prec1, net, optimizer)

        if retrieval_map > best_map:
            best_map = retrieval_map

        # save_record(epoch, prec1, net.module)
        print('curr accuracy: ', prec1)
        print('best accuracy: ', best_prec1)
        print('best map: ', best_map)

    print('Train Finished!')
Ejemplo n.º 5
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)



    # load data
    print('==> Preparing dataset %s' % args.dataset)
    features, landmarks, labels = pickle_2_img_and_landmark(args.dataset_path)
    num_classes = 6

    # Model
    print("==> creating model '{}'".format(args.arch))
    model = DGCNN(num_classes=num_classes)

    # model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
    # print('    resnet params: %.2fM' % (sum(p.numel() for p in model.resnet.parameters())/1000000.0))
    # print('    stgcn params: %.2fM' % (sum(p.numel() for p in model.st_gcn.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()

    # 分层优化
    # resnet_para = [model.conv1.parameters(), model.layer1.parameters(), model.layer2.parameters(), model.layer3.parameters(), model.layer4.parameters()]
    # optimizer = optim.SGD([
    #     {'params': model.gcn11.parameters()}, 
    #     {'params': model.gcn12.parameters()}, 
    #     {'params': model.gcn21.parameters()}, 
    #     {'params': model.gcn22.parameters()}, 
    #     {'params': model.gcn31.parameters()}, 
    #     {'params': model.gcn32.parameters()}, 
    #     {'params': model.fc.parameters()}, 
    #     {'params': model.conv1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     {'params': model.bn1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     {'params': model.layer1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     {'params': model.layer2.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     {'params': model.layer3.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     {'params': model.layer4.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
    #     ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    # Resume
    title = 'ckp-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log_stat.log'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log_stat.log'), title=title)
        logger.set_names(['fold_num', 'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

    # logging
    logging.basicConfig(level=logging.DEBUG,
                        filename=os.path.join(args.checkpoint, 'log_info.log'),
                        filemode='a+',
                        format="%(asctime)-15s %(levelname)-8s  %(message)s")
    # log configuration
    logging.info('-' * 10 + 'configuration' + '*' * 10)
    for arg in vars(args):
        logging.info((arg, str(getattr(args, arg))))

    acc_fold = []
    reset_lr = state['lr']
    for f_num in range(args.folds):
        state['lr'] = reset_lr
        model.reset_all_weights()
        # optimizer = optim.SGD([
        #     {'params': model.gcn11.parameters()}, 
        #     {'params': model.gcn12.parameters()}, 
        #     {'params': model.gcn21.parameters()}, 
        #     {'params': model.gcn22.parameters()}, 
        #     {'params': model.gcn31.parameters()}, 
        #     {'params': model.gcn32.parameters()}, 
        #     {'params': model.fc.parameters()}, 
        #     {'params': model.conv1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     {'params': model.bn1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     {'params': model.layer1.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     {'params': model.layer2.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     {'params': model.layer3.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     {'params': model.layer4.parameters(), 'lr': 0.005, 'weight_decay': 1e-3},
        #     ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
        print(args.lr)
        # save each fold's acc and reset configuration
        average_acc = 0
        best_acc = 0
    

        # 10-fold cross validation
        train_x, train_lm, train_y = [], [], []
        test_x, test_lm, test_y = [], [], []
        for id_fold in range(args.folds):
            if id_fold == f_num:
                test_x = features[id_fold]
                test_lm = landmarks[id_fold]
                test_y = labels[id_fold]
            else:
                train_x = train_x + features[id_fold]
                train_lm = train_lm + landmarks[id_fold]
                train_y = train_y + labels[id_fold]
        # convert array to tensor
        train_x = torch.tensor(train_x, dtype=torch.float) / 255.0  #(b_s, 128, 128)
        train_x = train_x.unsqueeze(1)  #(b_s, 1, 128, 128)

        train_lm = np.stack(train_lm)
        # 只要坐标信息, 不需要归一化
        train_lm = (train_lm - np.mean(train_lm, axis=0)) / np.std(train_lm, axis=0)
        train_lm = torch.tensor(train_lm, dtype=torch.float)
        # train_lm = train_lm.unsqueeze(2)

        test_x = torch.tensor(test_x, dtype=torch.float) / 255.0
        test_x = test_x.unsqueeze(1)
        # 只要坐标信息, 不需要归一化
        test_lm = (test_lm - np.mean(test_lm, axis=0)) / np.std(test_lm, axis=0)
        test_lm = torch.tensor(test_lm, dtype=torch.float)
        # test_lm = test_lm.unsqueeze(2)
        train_y, test_y = torch.tensor(train_y), torch.tensor(test_y)

        train_dataset = torch.utils.data.TensorDataset(train_x, train_lm, train_y)
        train_iter = torch.utils.data.DataLoader(
            dataset=train_dataset,
            batch_size=args.train_batch,
            shuffle=True
        )

        test_dataset = torch.utils.data.TensorDataset(test_x, test_lm, test_y)
        test_iter = torch.utils.data.DataLoader(
            dataset=test_dataset,
            batch_size=args.test_batch,
            shuffle=False
        )

        # test for fold order
        print(len(test_dataset))

        if args.evaluate:
            print('\nEvaluation only')
            test_loss, test_acc = test(train_x + test_x, train_y + test_y, model, criterion, start_epoch, use_cuda)
            print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
            continue

        # show plt
        # plt.show(block=False)

        # Train and val
        for epoch in range(start_epoch, args.epochs):
            
            # 在特定的epoch 调整学习率
            adjust_learning_rate(optimizer, epoch)
            # print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
            print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr']))

            train_loss, train_acc = train(train_iter, model, criterion, optimizer, epoch, use_cuda)
            test_loss, test_acc = test(test_iter, model, criterion, epoch, use_cuda)

            # append logger file
            logger.append([f_num, state['lr'], train_loss, test_loss, train_acc, test_acc])

            # save model
            is_best = test_acc > best_acc
            best_acc = max(test_acc, best_acc)
            save_checkpoint({
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'acc': test_acc,
                    'best_acc': best_acc,
                    'optimizer' : optimizer.state_dict(),
                }, is_best, f_num, checkpoint=args.checkpoint)

        # compute average acc
        acc_fold.append(best_acc)
        average_acc = sum(acc_fold) / len(acc_fold)

        logging.info('fold: %d, best_acc: %.2f, average_acc: %.2f' % (f_num, best_acc, average_acc))
    logger.close()
    # logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    logging.info('acc_fold' + str(acc_fold))
    print('average acc:')
    print(average_acc)