示例#1
0
    def stage1():
        log_dir = os.path.join(args.log_path, args.name, args.name+'_stage_1')
        create_folder(log_dir)

        optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    

        train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

        val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
        print('num_train_files: '+str(len(train_dataset.filepaths)))
        print('num_val_files: '+str(len(val_dataset.filepaths)))
        trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
        trainer.train(args.epoch)
示例#2
0
def train_3d_single():
    # STAGE 1
    print('Stage_1 begin:')

    log_dir = args.name + '_stage_1'
    create_folder(log_dir)

    svcnn = SVCNN(args.name,
                  nclasses=40,
                  pretraining=True,
                  cnn_name=args.cnn_name)
    optimizer = optim.Adam(svcnn.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    train_file = open(args.single_train_path)
    train_list = json.load(train_file)
    train_dataset = SingleImgDataset(train_list,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=0)

    test_file = open(args.single_test_path)
    test_list = json.load(test_file)
    val_dataset = SingleImgDataset(test_list,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=64,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.data_list)))
    print('num_val_files: ' + str(len(val_dataset.data_list)))
    trainer = ModelNetTrainer(svcnn,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(10)
    return svcnn
示例#3
0
def train_3d_multi(svcnn):
    print('Stage_2 begin:')
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    gvcnn = GVCNN(args.name, svcnn, nclasses=40, num_views=args.num_views)
    del svcnn

    optimizer = optim.Adam(gvcnn.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay,
                           betas=(0.9, 0.999))

    train_file = open(args.multi_train_path)
    train_list = json.load(train_file)
    train_dataset = MultiviewImgDataset(train_list,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batchSize,
                                               shuffle=True,
                                               num_workers=0)

    test_file = open(args.multi_test_path)
    test_list = json.load(test_file)
    val_dataset = MultiviewImgDataset(test_list,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=args.num_views)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batchSize,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.data_list)))
    print('num_val_files: ' + str(len(val_dataset.data_list)))
    trainer = ModelNetTrainer(gvcnn,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'gvcnn',
                              log_dir,
                              num_views=args.num_views)
    trainer.train(15)
示例#4
0
    create_folder(log_dir)
    cnet = SVCNN(args.name, nclasses=40, pretraining=pretraining, cnn_name=args.cnn_name)

    optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    
    n_models_train = args.num_models*args.num_views

    train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

    val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(30)

    # STAGE 2
    log_dir = args.name+'_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
示例#5
0
def train(config):
    log(config.log_file, 'Starting...')
    pretraining = not config.no_pretraining
    log_dir = config.name
    create_folder(config.name)

    log(config.log_file, '--------------stage 1--------------')
    # STAGE 1
    log_dir = os.path.join(config.log_dir, config.name + '_stage_1')
    create_folder(log_dir)
    cnet = SVCNN(config, pretraining=pretraining)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay)
    train_path = os.path.join(config.data, "*/train")
    train_dataset = SingleImgDataset(train_path,
                                     config,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=True,
        num_workers=0)

    val_path = os.path.join(config.data, "*/test")
    val_dataset = SingleImgDataset(val_path,
                                   config,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=False,
        num_workers=0)

    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))

    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=1)
    trainer.train(config, config.stage1_batch_size)
    #cnet.load(os.path.join(log_dir, config.snapshot_prefix + str(30)))

    # STAGE 2
    log(config.log_file, '--------------stage 2--------------')
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(cnet, config)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(train_path,
                                        config,
                                        scale_aug=False,
                                        rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(val_path,
                                      config,
                                      scale_aug=False,
                                      rot_aug=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)
    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)
    trainer.train(config, config.stage2_batch_size)
示例#6
0
        val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
        print('num_train_files: '+str(len(train_dataset.filepaths)))
        print('num_val_files: '+str(len(val_dataset.filepaths)))
        trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
        trainer.train(args.epoch)

    if not args.skip_stage1:
        stage1()

    # STAGE 2
    log_dir = os.path.join(args.log_path, args.name, args.name+'_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name, cnet, nclasses=args.num_class, cnn_name=args.cnn_name, num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views, num_class=args.num_class)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views, num_class=args.num_class)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('####stage_2####')
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views, num_class=args.num_class)
    trainer.train(args.epoch)


示例#7
0
    # 训练集和测试集分别为9843和2468再乖以12
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))

    # 这里只是定义一个训练器,记录数据,输出loss和acc, svcnn和num_view=1即只要单个图像输入
    trainer = ModelNetTrainer(cnet_,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    tic1 = time.clock()
    trainer.train(n_epochs=0)  # 测试时设为1,看能否完整跑完两个阶段
    toc1 = time.clock()
    print('The training time of first stage: %d m' % ((toc1 - tic1) / 60))

    # STAGE 2
    print('###################Stage 2####################')
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)

    # cnet_2与cnet采用相同的网络
    cnet_2 = MVCNN(args.name,
                   cnet,
                   pool_mode='no',
                   nclasses=40,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
示例#8
0
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=400,
                                             shuffle=False,
                                             num_workers=4)
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(15)

    # # # STAGE 2
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    cnet_2 = view_GCN(args.name,
                      cnet,
                      nclasses=40,
                      cnn_name=args.cnn_name,
                      num_views=args.num_views)
    optimizer = optim.SGD(cnet_2.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay,
                          momentum=0.9)
    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
示例#9
0
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=400,
                                             shuffle=False,
                                             num_workers=4)
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(30)

    # # # STAGE 2
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    cnet_2 = view_GCN(args.name,
                      cnet,
                      nclasses=40,
                      cnn_name=args.cnn_name,
                      num_views=args.num_views)
    optimizer = optim.SGD(cnet_2.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay,
                          momentum=0.9)
    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
示例#10
0
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(1)

    # STAGE 2
    log_dir = args.name + '_stage_2'
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name,
                   cnet,
                   nclasses=args.num_classes,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay,
                           betas=(0.9, 0.999))
示例#11
0
                                             shuffle=False,
                                             num_workers=0)

    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer_model,
                              optimizer_centerloss,
                              softmax_loss,
                              center_loss,
                              'svcnn',
                              log_dir,
                              num_views=1)
    trainer.train(20)

    # STAGE 2
    log_dir = 'smtcloss_wonor_' + args.name + '_stage_2_' + localtime
    create_folder(log_dir)
    cnet_2 = MVCNN(args.name,
                   cnet,
                   nclasses=40,
                   cnn_name=args.cnn_name,
                   num_views=args.num_views)
    del cnet

    optimizer = optim.SGD(cnet_2.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay)
示例#12
0
    #     cnet_2.module.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.module.eval()
    # else:
    #     cnet_2.load_state_dict(torch.load("Vgg11_Seg_white_stage_2/Vgg11_Seg_white/model-00027.pth"))
    #     cnet_2.eval()
    ### -------------------------------------------------------------------------------------------------------------

    if use_dataparallel:
        cnet_2 = nn.DataParallel(cnet_2)
        cnet_2.to(device)

    optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
    
    train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views,KNU_data=args.KNU_Data,pixel_augmentation=args.pixel_augmentation)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))

    if(args.loss_type == 'focal_loss'):
        focal_loss = FocalLoss(gamma=2, alpha=0.25)
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, focal_loss, 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)
    else:
        trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views, nClasses=nclasses)

    trainer.train(30, use_dataparallel)


示例#13
0
    train_dataset = MultiviewImgDataset(args.train_path,
                                        scale_aug=False,
                                        rot_aug=False,
                                        num_models=n_models_train,
                                        num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(args.val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=args.num_views,
                                      test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batchSize,
                                             shuffle=False,
                                             num_workers=0)
    print('num_train_files: ' + str(len(train_dataset.filepaths)))
    print('num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              'mvcnn',
                              log_dir,
                              num_views=args.num_views)
    trainer.train(60)