예제 #1
0
def test(config):
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')

    val_path = os.path.join(config.data, "*/test")

    val_dataset = MultiviewImgDataset(val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=config.num_views)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)

    pretraining = not config.no_pretraining
    cnet = SVCNN(config.name,
                 nclasses=config.num_classes,
                 cnn_name=config.cnn_name,
                 pretraining=pretraining)

    cnet_2 = MVCNN(config.name,
                   cnet,
                   nclasses=config.num_classes,
                   cnn_name=config.cnn_name,
                   num_views=config.num_views)
    cnet_2.load(
        os.path.join(log_dir, config.snapshot_prefix + str(config.weights)))
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    trainer = ModelNetTrainer(cnet_2,
                              None,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)

    labels, predictions = trainer.update_validation_accuracy(config.weights,
                                                             test=True)
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
예제 #2
0
    n_models_train = args.num_models*args.num_views

    train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

    val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(30)

    # STAGE 2
    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views)
    trainer.train(30)

예제 #3
0
파일: train.py 프로젝트: Alobal123/diplomka
def train(config):
    log(config.log_file, 'Starting...')
    pretraining = not config.no_pretraining
    log_dir = config.name
    create_folder(config.name)

    log(config.log_file, '--------------stage 1--------------')
    # STAGE 1
    log_dir = os.path.join(config.log_dir, config.name + '_stage_1')
    create_folder(log_dir)
    cnet = SVCNN(config, pretraining=pretraining)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay)
    train_path = os.path.join(config.data, "*/train")
    train_dataset = SingleImgDataset(train_path,
                                     config,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=True,
        num_workers=0)

    val_path = os.path.join(config.data, "*/test")
    val_dataset = SingleImgDataset(val_path,
                                   config,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=False,
        num_workers=0)

    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))

    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=1)
    trainer.train(config, config.stage1_batch_size)
    #cnet.load(os.path.join(log_dir, config.snapshot_prefix + str(30)))

    # STAGE 2
    log(config.log_file, '--------------stage 2--------------')
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(cnet, config)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(train_path,
                                        config,
                                        scale_aug=False,
                                        rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(val_path,
                                      config,
                                      scale_aug=False,
                                      rot_aug=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)
    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)
    trainer.train(config, config.stage2_batch_size)
예제 #4
0
		print('num_val_files: '+str(len(val_dataset.filepaths)))
		trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
		trainer.train(30)
	elif args.svcnn_training_mode == 'load':
		cnet.cuda()
		log_dir = args.prefix + 'ckpt/svcnn/model-00025.pth'
#		log_dir = ckpt_dir + '/20/mvcnn_stage_1/mvcnn/model-00025.pth'
		model = torch.load(log_dir)
		cnet.load_state_dict(model)
		print('SVCNN trained model loaded!')


	# STAGE 2
	log_dir = run_dir + '/' + args.name + '_stage_2'
	create_folder(log_dir)
	cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views, constraint=args.constraint, w_m=args.w_m, T=args.T, preType=args.preType)
	del cnet

	new_params = list(cnet_2.main_net.parameters()) + list(cnet_2.main_net.parameters()) if args.preType != None else cnet_2.main_net.parameters()
	params = new_params if args.freeze else cnet_2.parameters()
	optimizer = optim.Adam(params, lr=args.lr/8*args.batchSize, weight_decay=args.weight_decay, betas=(0.9, 0.999))
	
	train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
	train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0) # shuffle needs to be false! it's done within the trainer

	val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views)
	val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
	print('num_train_files: '+str(len(train_dataset.filepaths)))
	print('num_val_files: '+str(len(val_dataset.filepaths)))
	trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views)
	trainer.train(30)
예제 #5
0
    #path='/home/yw/Desktop/mvcnn_pytorch-master_ECCV2018/mvcnn_self_attn_1and2stage_stage_1and2/'
    path = '/home/yw/Desktop/mvcnn_pytorch-master_ECCV2018/mvcnn_run2_stage_2/'
    modelfile = 'model-00020.pth'

    args = parser.parse_args()

    pretraining = not args.no_pretraining

    cnet = SVCNN(args.name,
                 nclasses=40,
                 pretraining=pretraining,
                 cnn_name=args.cnn_name)

    cnet_2 = MVCNN('mvcnn_run2',
                   cnet,
                   nclasses=40,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet
    cnet_2.cuda()

    cnet_2.load(path, modelfile)
    cnet_2.eval()

    n_models_train = args.num_models * args.num_views
    log_dir = None

    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_models=n_models_train,
예제 #6
0
                              log_dir,
                              num_views=1)
    tic1 = time.clock()
    trainer.train(n_epochs=0)  # 测试时设为1,看能否完整跑完两个阶段
    toc1 = time.clock()
    print('The training time of first stage: %d m' % ((toc1 - tic1) / 60))

    # STAGE 2
    print('###################Stage 2####################')
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)

    # cnet_2与cnet采用相同的网络
    cnet_2 = MVCNN(args.name,
                   cnet,
                   pool_mode='no',
                   nclasses=40,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet
    cnet_2 = torch.nn.DataParallel(cnet_2, device_ids=[0])  # 0,1
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay,
                           betas=(0.9, 0.999))

    # bitch_size: 原文为8,内存不足改为6

    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_models=n_models_train,
예제 #7
0
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(30)

    # STAGE 2
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name,
                   cnet,
                   nclasses=44,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_models=n_models_train,
                                        num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(
예제 #8
0
                                    scale_aug=False,
                                    rot_aug=False,
                                    test_mode=True,
                                    shuffle=False,
                                    num_views=args.num_views)
query_loader = torch.utils.data.DataLoader(query_dataset,
                                           batch_size=args.batchSize,
                                           shuffle=False,
                                           num_workers=0)
print('num_gallery_files:  ' + str(len(gallery_dataset.filepaths)))
print('num_query_files:  ' + str(len(query_dataset.filepaths)))

cnet = SVCNN(args.name, nclasses=40, pretraining=False, cnn_name=args.cnn_name)
cnet_2 = MVCNN(args.name,
               cnet,
               nclasses=40,
               cnn_name=args.cnn_name,
               num_views=args.num_views)
del cnet

model_parameters = torch.load('records/model_4.pth')
cnet_2 = torch.nn.DataParallel(cnet_2)
torch.backends.cudnn.benchmark = True
cnet_2.load_state_dict(model_parameters)

# query_features_list, query_labels_list = extract_features(cnet_2, query_loader)
# sio.savemat(log_dir + '/query.mat', {'features': query_features_list, 'labels': query_labels_list})

gallery_features_list, gallery_labels_list = extract_features(
    cnet_2, gallery_loader)
sio.savemat(log_dir + '/gallery.mat', {
예제 #9
0
                    help="number of epochs",
                    default=30)

args = parser.parse_args()
log_dir = os.path.join(args.log_path, 'mvcnn/mvcnn_stage_2/')
savepath = os.path.join(log_dir, 'best_model.pth')

pretraining = not args.no_pretraining

cnet = SVCNN(args.name,
             nclasses=args.num_class,
             pretraining=pretraining,
             cnn_name=args.cnn_name)
cnet_2 = MVCNN(args.name,
               cnet,
               nclasses=args.num_class,
               cnn_name=args.cnn_name,
               num_views=args.num_views)
del cnet

val_dataset = MultiviewImgDataset(args.val_path,
                                  scale_aug=False,
                                  rot_aug=False,
                                  num_views=args.num_views)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batchSize,
                                         shuffle=False,
                                         num_workers=0)


def update_validation_accuracy(model, val_loader, log_dir, num_class=3):
예제 #10
0
    #                               num_views=1, nClasses=nclasses)
    # trainer.train(30, use_dataparallel)

    # STAGE 2
    ### -----------------------stage 2부터 시작할 때만 필요한 코드!---------------------------------------------
    # if use_dataparallel == True:
    #     cnet.module.load_state_dict(torch.load("Vgg11_Seg_white_stage_1/Vgg11_Seg_white/model-00022.pth"))
    #     cnet.module.eval()
    # else:
    #     cnet.load_state_dict(torch.load("Vgg11_Seg_white_stage_1/Vgg11_Seg_white/model-00022.pth"))
    #     cnet.eval()
    ### -----------------------stage 2부터 시작할 때만 필요한 코드!---------------------------------------------

    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=nclasses, cnn_name=args.cnn_name, num_views=args.num_views,KNU_data=args.KNU_Data,
                 use_encdec=args.use_encdec, encdec_name=args.encdec_name, encdim=args.encdim, use_dataparallel=use_dataparallel)
    del cnet

    ### -----------------------stage 2의 30epoch 이상 학습시 필요한 코드!---------------------------------------------
    # if use_dataparallel == True:
    #     cnet_2.module.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.module.eval()
    # else:
    #     cnet_2.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.eval()
    ### -------------------------------------------------------------------------------------------------------------

    if use_dataparallel:
        cnet_2 = nn.DataParallel(cnet_2)
        cnet_2.to(device)