コード例 #1
0
ファイル: train.py プロジェクト: martinmCGG/diplomka
def test(config):
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')

    val_path = os.path.join(config.data, "*/test")

    val_dataset = MultiviewImgDataset(val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=config.num_views)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)

    pretraining = not config.no_pretraining
    cnet = SVCNN(config.name,
                 nclasses=config.num_classes,
                 cnn_name=config.cnn_name,
                 pretraining=pretraining)

    cnet_2 = MVCNN(config.name,
                   cnet,
                   nclasses=config.num_classes,
                   cnn_name=config.cnn_name,
                   num_views=config.num_views)
    cnet_2.load(
        os.path.join(log_dir, config.snapshot_prefix + str(config.weights)))
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    trainer = ModelNetTrainer(cnet_2,
                              None,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)

    labels, predictions = trainer.update_validation_accuracy(config.weights,
                                                             test=True)
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
コード例 #2
0
ファイル: train_mvcnn.py プロジェクト: hjjpku/multi_view_sort
    train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

    val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(30)

    # STAGE 2
    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views)
    trainer.train(30)


コード例 #3
0
ファイル: train.py プロジェクト: Alobal123/diplomka
def train(config):
    log(config.log_file, 'Starting...')
    pretraining = not config.no_pretraining
    log_dir = config.name
    create_folder(config.name)

    log(config.log_file, '--------------stage 1--------------')
    # STAGE 1
    log_dir = os.path.join(config.log_dir, config.name + '_stage_1')
    create_folder(log_dir)
    cnet = SVCNN(config, pretraining=pretraining)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay)
    train_path = os.path.join(config.data, "*/train")
    train_dataset = SingleImgDataset(train_path,
                                     config,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=True,
        num_workers=0)

    val_path = os.path.join(config.data, "*/test")
    val_dataset = SingleImgDataset(val_path,
                                   config,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=False,
        num_workers=0)

    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))

    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=1)
    trainer.train(config, config.stage1_batch_size)
    #cnet.load(os.path.join(log_dir, config.snapshot_prefix + str(30)))

    # STAGE 2
    log(config.log_file, '--------------stage 2--------------')
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(cnet, config)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(train_path,
                                        config,
                                        scale_aug=False,
                                        rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(val_path,
                                      config,
                                      scale_aug=False,
                                      rot_aug=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)
    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)
    trainer.train(config, config.stage2_batch_size)
コード例 #4
0
    print('OA: ', val_overall_acc)
    print('AA: ', val_mean_class_acc)

    return features_list, labels_list


args = parser.parse_args()
print('###################Retrieval####################')
# 创建MVCNN_stage_1文件夹
log_dir = args.name + '_retrieval'
create_folder(log_dir)

gallery_dataset = MultiviewImgDataset(args.train_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      test_mode=True,
                                      shuffle=False,
                                      num_views=args.num_views)
gallery_loader = torch.utils.data.DataLoader(gallery_dataset,
                                             batch_size=args.batchSize,
                                             shuffle=False,
                                             num_workers=0)
query_dataset = MultiviewImgDataset(args.val_path,
                                    scale_aug=False,
                                    rot_aug=False,
                                    test_mode=True,
                                    shuffle=False,
                                    num_views=args.num_views)
query_loader = torch.utils.data.DataLoader(query_dataset,
                                           batch_size=args.batchSize,
                                           shuffle=False,
コード例 #5
0
ファイル: test.py プロジェクト: sngver/mvcnn_pytorch
pretraining = not args.no_pretraining

cnet = SVCNN(args.name,
             nclasses=args.num_class,
             pretraining=pretraining,
             cnn_name=args.cnn_name)
cnet_2 = MVCNN(args.name,
               cnet,
               nclasses=args.num_class,
               cnn_name=args.cnn_name,
               num_views=args.num_views)
del cnet

val_dataset = MultiviewImgDataset(args.val_path,
                                  scale_aug=False,
                                  rot_aug=False,
                                  num_views=args.num_views)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batchSize,
                                         shuffle=False,
                                         num_workers=0)


def update_validation_accuracy(model, val_loader, log_dir, num_class=3):
    all_correct_points = 0
    all_points = 0

    DUMP_DIR = os.path.join(log_dir, 'dump')
    if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)

    # in_data = None
コード例 #6
0
    print('OA: ', val_overall_acc)
    print('AA: ', val_mean_class_acc)

    return features_list, labels_list


args = parser.parse_args()
print('###################Retrieval####################')
# 创建MVCNN_stage_1文件夹
log_dir = args.name + '_retrieval'
create_folder(log_dir)

sketch_dataset = MultiviewImgDataset(args.sketch_path,
                                     scale_aug=False,
                                     rot_aug=False,
                                     test_mode=True,
                                     shuffle=False,
                                     num_views=args.num_views)
sketch_loader = torch.utils.data.DataLoader(sketch_dataset,
                                            batch_size=args.batchSize,
                                            shuffle=False,
                                            num_workers=0)
print('num_gallery_files:  ' + str(len(sketch_dataset.filepaths)))

cnet = SVCNN(args.name, nclasses=40, pretraining=False, cnn_name=args.cnn_name)
cnet_2 = MVCNN(args.name,
               cnet,
               nclasses=40,
               cnn_name=args.cnn_name,
               num_views=args.num_views)
del cnet
コード例 #7
0
    ### -----------------------stage 2의 30epoch 이상 학습시 필요한 코드!---------------------------------------------
    # if use_dataparallel == True:
    #     cnet_2.module.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.module.eval()
    # else:
    #     cnet_2.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.eval()
    ### -------------------------------------------------------------------------------------------------------------

    if use_dataparallel:
        cnet_2 = nn.DataParallel(cnet_2)
        cnet_2.to(device)

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))

    if(args.loss_type == 'focal_loss'):
        focal_loss = FocalLoss(gamma=2, alpha=0.25)
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, focal_loss, 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)
    else:
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)

    trainer.train(30, use_dataparallel)
コード例 #8
0
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(args.epoch)

    # STAGE 2
    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=len(classnames), cnn_name=args.cnn_name, num_views=args.num_views)
    
    #if torch.cuda.device_count()>1:
    #    print('Use',torch.cuda.device_count(),'GPUs')
    #    cnet_2 = nn.DataParallel(cnet_2)

    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, classnames,objs,scale_aug=False, rot_aug=False, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, classnames,objs,scale_aug=False, rot_aug=False, num_views=args.num_views)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views)
    trainer.train(args.epoch)


コード例 #9
0
ファイル: classifier.py プロジェクト: luixiao1223/view-GCN
if __name__ == '__main__':

    classnames=['airplane','bathtub','bed','bench','bookshelf','bottle','bowl','car','chair',
                         'cone','cup','curtain','desk','door','dresser','flower_pot','glass_box',
                         'guitar','keyboard','lamp','laptop','mantel','monitor','night_stand',
                         'person','piano','plant','radio','range_hood','sink','sofa','stairs',
                         'stool','table','tent','toilet','tv_stand','vase','wardrobe','xbox']

    cnet = SVCNN('mvcnn', nclasses=40, pretraining=False, cnn_name='resnet18')
    vgcnn = view_GCN('mvcnn', cnet, nclasses=40, cnn_name='resnet18', num_views=20)

    vgcnn.eval()
    vgcnn.load("./view-gcn", "trained_view_gcn.pth")

    val_dataset = MultiviewImgDataset('data/modelnet40v2png_ori4/*/test', scale_aug=False, rot_aug=False, num_views=20,test_mode=False)

    Counter = 0
    index = 0
    for _, data in enumerate(val_dataset, 0):
        V, C, H, W = data[1].size()
        in_data = Variable(data[1]).view(-1, C, H, W).cpu()
        target = data[0]
        out_data,F1,F2=vgcnn(in_data)
        pred = torch.max(out_data, 1)[1]
        pred = pred[0].cpu().detach().numpy().tolist()
        if pred == target:
            Counter += 1
        index += 1
        print(Counter, index, Counter/len(val_dataset))
    print("total acc:", Counter/len(val_dataset))