Exemplo n.º 1
0
def test(config):
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')

    val_path = os.path.join(config.data, "*/test")

    val_dataset = MultiviewImgDataset(val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=config.num_views)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)

    pretraining = not config.no_pretraining
    cnet = SVCNN(config.name,
                 nclasses=config.num_classes,
                 cnn_name=config.cnn_name,
                 pretraining=pretraining)

    cnet_2 = MVCNN(config.name,
                   cnet,
                   nclasses=config.num_classes,
                   cnn_name=config.cnn_name,
                   num_views=config.num_views)
    cnet_2.load(
        os.path.join(log_dir, config.snapshot_prefix + str(config.weights)))
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    trainer = ModelNetTrainer(cnet_2,
                              None,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)

    labels, predictions = trainer.update_validation_accuracy(config.weights,
                                                             test=True)
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
Exemplo n.º 2
0
    def stage1():
        log_dir = os.path.join(args.log_path, args.name, args.name+'_stage_1')
        create_folder(log_dir)

        optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    

        train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

        val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
        print('num_train_files: '+str(len(train_dataset.filepaths)))
        print('num_val_files: '+str(len(val_dataset.filepaths)))
        trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
        trainer.train(args.epoch)
Exemplo n.º 3
0
def train_3d_single():
    # STAGE 1
    print('Stage_1 begin:')

    log_dir = args.name + '_stage_1'
    create_folder(log_dir)

    svcnn = SVCNN(args.name,
                  nclasses=40,
                  pretraining=True,
                  cnn_name=args.cnn_name)
    optimizer = optim.Adam(svcnn.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    train_file = open(args.single_train_path)
    train_list = json.load(train_file)
    train_dataset = SingleImgDataset(train_list,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=0)

    test_file = open(args.single_test_path)
    test_list = json.load(test_file)
    val_dataset = SingleImgDataset(test_list,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=64,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.data_list)))
    print('num_val_files: ' + str(len(val_dataset.data_list)))
    trainer = ModelNetTrainer(svcnn,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(10)
    return svcnn
Exemplo n.º 4
0
def train_3d_multi(svcnn):
    print('Stage_2 begin:')
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    gvcnn = GVCNN(args.name, svcnn, nclasses=40, num_views=args.num_views)
    del svcnn

    optimizer = optim.Adam(gvcnn.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay,
                           betas=(0.9, 0.999))

    train_file = open(args.multi_train_path)
    train_list = json.load(train_file)
    train_dataset = MultiviewImgDataset(train_list,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batchSize,
                                               shuffle=True,
                                               num_workers=0)

    test_file = open(args.multi_test_path)
    test_list = json.load(test_file)
    val_dataset = MultiviewImgDataset(test_list,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=args.num_views)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batchSize,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.data_list)))
    print('num_val_files: ' + str(len(val_dataset.data_list)))
    trainer = ModelNetTrainer(gvcnn,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'gvcnn',
                              log_dir,
                              num_views=args.num_views)
    trainer.train(15)
Exemplo n.º 5
0
    log_dir = args.name+'_stage_1'
    create_folder(log_dir)
    cnet = SVCNN(args.name, nclasses=40, pretraining=pretraining, cnn_name=args.cnn_name)

    optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    
    n_models_train = args.num_models*args.num_views

    train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

    val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(30)

    # STAGE 2
    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
Exemplo n.º 6
0
                                                   shuffle=False,
                                                   num_workers=0)

        val_dataset = KmeanImgDataset(args.val_path)
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=args.batchSize,
                                                 shuffle=False,
                                                 num_workers=0)

        print('num_train_files: ' + str(len(train_dataset.filepaths)))
        print('num_val_files: ' + str(len(val_dataset.filepaths)))

        trainer = ModelNetTrainer(cnet_2,
                                  train_loader,
                                  val_loader,
                                  optimizer,
                                  nn.CrossEntropyLoss(),
                                  'mvcnn',
                                  log_dir,
                                  num_views=args.num_views)
        trainer.train_kmean_threeview(30)

    else:  # test
        path = '~/mvcnn_pytorch-master_ECCV2018_backup_2019_11_22/MVCNN_kmean_cat_no_sort_128/'
        modelfile = 'model-00002.pth'

        pretraining = not args.no_pretraining
        log_dir = args.name
        cnet = SVCNN(args.name,
                     nclasses=40,
                     pretraining=pretraining,
                     cnn_name=args.cnn_name)
Exemplo n.º 7
0
def train(config):
    log(config.log_file, 'Starting...')
    pretraining = not config.no_pretraining
    log_dir = config.name
    create_folder(config.name)

    log(config.log_file, '--------------stage 1--------------')
    # STAGE 1
    log_dir = os.path.join(config.log_dir, config.name + '_stage_1')
    create_folder(log_dir)
    cnet = SVCNN(config, pretraining=pretraining)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay)
    train_path = os.path.join(config.data, "*/train")
    train_dataset = SingleImgDataset(train_path,
                                     config,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=True,
        num_workers=0)

    val_path = os.path.join(config.data, "*/test")
    val_dataset = SingleImgDataset(val_path,
                                   config,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=False,
        num_workers=0)

    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))

    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=1)
    trainer.train(config, config.stage1_batch_size)
    #cnet.load(os.path.join(log_dir, config.snapshot_prefix + str(30)))

    # STAGE 2
    log(config.log_file, '--------------stage 2--------------')
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(cnet, config)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(train_path,
                                        config,
                                        scale_aug=False,
                                        rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(val_path,
                                      config,
                                      scale_aug=False,
                                      rot_aug=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)
    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)
    trainer.train(config, config.stage2_batch_size)
Exemplo n.º 8
0
        val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
        print('num_train_files: '+str(len(train_dataset.filepaths)))
        print('num_val_files: '+str(len(val_dataset.filepaths)))
        trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
        trainer.train(args.epoch)

    if not args.skip_stage1:
        stage1()

    # STAGE 2
    log_dir = os.path.join(args.log_path, args.name, args.name+'_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=args.num_class, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views, num_class=args.num_class)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views, num_class=args.num_class)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('####stage_2####')
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views, num_class=args.num_class)
    trainer.train(args.epoch)


Exemplo n.º 9
0
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=48,
                                             shuffle=False,
                                             num_workers=10)

    # 训练集和测试集分别为9843和2468再乖以12
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))

    # 这里只是定义一个训练器,记录数据,输出loss和acc, svcnn和num_view=1即只要单个图像输入
    trainer = ModelNetTrainer(cnet_,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    tic1 = time.clock()
    trainer.train(n_epochs=0)  # 测试时设为1,看能否完整跑完两个阶段
    toc1 = time.clock()
    print('The training time of first stage: %d m' % ((toc1 - tic1) / 60))

    # STAGE 2
    print('###################Stage 2####################')
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)

    # cnet_2与cnet采用相同的网络
    cnet_2 = MVCNN(args.name,
Exemplo n.º 10
0
        val_dataset = KmeanImgDataset(args.val_path,
                                      fea_type=args.fea_type,
                                      dataset=dataset)
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=args.batchSize,
                                                 shuffle=False,
                                                 num_workers=0)

        print('num_train_files: ' + str(len(train_dataset.filepaths)))
        print('num_val_files: ' + str(len(val_dataset.filepaths)))

        trainer = ModelNetTrainer((nem, cnet_2),
                                  train_loader,
                                  val_loader,
                                  optimizer,
                                  nn.CrossEntropyLoss(),
                                  None,
                                  log_dir,
                                  num_views=args.num_views,
                                  class_num=class_num)
        trainer.train_nem_mvcnn(args.epoch)

    else:  # test
        path = '/mnt/cloud_disk/huangjj/exp_mvcnn/' + args.name
        modelfile = args.modelfile

        pretraining = not args.no_pretraining
        log_dir = args.name
        cnet = SVCNN(args.name,
                     nclasses=class_num,
                     pretraining=pretraining,
Exemplo n.º 11
0
    val_dataset = SingleImgDataset(args.val_path,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=64,
                                             shuffle=False,
                                             num_workers=0)

    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer_model,
                              optimizer_centerloss,
                              softmax_loss,
                              center_loss,
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(20)

    # STAGE 2
    log_dir = 'smtcloss_wonor_' + args.name + '_stage_2_' + localtime
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name,
                   cnet,
                   nclasses=40,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet
Exemplo n.º 12
0
    #     cnet_2.module.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.module.eval()
    # else:
    #     cnet_2.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.eval()
    ### -------------------------------------------------------------------------------------------------------------

    if use_dataparallel:
        cnet_2 = nn.DataParallel(cnet_2)
        cnet_2.to(device)

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))

    if(args.loss_type == 'focal_loss'):
        focal_loss = FocalLoss(gamma=2, alpha=0.25)
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, focal_loss, 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)
    else:
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)

    trainer.train(30, use_dataparallel)