Exemplo n.º 1
0
def test(config):
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')

    val_path = os.path.join(config.data, "*/test")

    val_dataset = MultiviewImgDataset(val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=config.num_views)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)

    pretraining = not config.no_pretraining
    cnet = SVCNN(config.name,
                 nclasses=config.num_classes,
                 cnn_name=config.cnn_name,
                 pretraining=pretraining)

    cnet_2 = MVCNN(config.name,
                   cnet,
                   nclasses=config.num_classes,
                   cnn_name=config.cnn_name,
                   num_views=config.num_views)
    cnet_2.load(
        os.path.join(log_dir, config.snapshot_prefix + str(config.weights)))
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    trainer = ModelNetTrainer(cnet_2,
                              None,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)

    labels, predictions = trainer.update_validation_accuracy(config.weights,
                                                             test=True)
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
Exemplo n.º 2
0
    torch.cuda.manual_seed_all(10)
    np.random.seed(10)

    args = parser.parse_args()

    pretraining = not args.no_pretraining
    log_dir = args.name
    create_folder(args.name)
    config_f = open(os.path.join(log_dir, 'config.json'), 'w')
    json.dump(vars(args), config_f)
    config_f.close()

    # STAGE 1
    log_dir = args.name+'_stage_1'
    create_folder(log_dir)
    cnet = SVCNN(args.name, nclasses=40, pretraining=pretraining, cnn_name=args.cnn_name)

    optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    
    n_models_train = args.num_models*args.num_views

    train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

    val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    print('num_train_files: '+str(len(train_dataset.filepaths)))
    print('num_val_files: '+str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
    trainer.train(30)
Exemplo n.º 3
0
    args = parser.parse_args()

    if True:  # train

        pretraining = not args.no_pretraining
        log_dir = args.name
        create_folder(args.name)
        config_f = open(os.path.join(log_dir, 'config.json'), 'w')
        json.dump(vars(args), config_f)
        config_f.close()

        # STAGE 1

        cnet = SVCNN(args.name,
                     nclasses=40,
                     pretraining=pretraining,
                     cnn_name=args.cnn_name)

        create_folder(log_dir)
        cnet_2 = ThreeView_att_sort(cnet, nclasses=40)
        del cnet

        optimizer = optim.Adam(cnet_2.parameters(),
                               lr=args.lr,
                               weight_decay=args.weight_decay,
                               betas=(0.9, 0.999))
        train_dataset = KmeanImgDataset(args.train_path)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batchSize,
                                                   shuffle=False,
                                                   num_workers=0)
Exemplo n.º 4
0
def train(config):
    log(config.log_file, 'Starting...')
    pretraining = not config.no_pretraining
    log_dir = config.name
    create_folder(config.name)

    log(config.log_file, '--------------stage 1--------------')
    # STAGE 1
    log_dir = os.path.join(config.log_dir, config.name + '_stage_1')
    create_folder(log_dir)
    cnet = SVCNN(config, pretraining=pretraining)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay)
    train_path = os.path.join(config.data, "*/train")
    train_dataset = SingleImgDataset(train_path,
                                     config,
                                     scale_aug=False,
                                     rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=True,
        num_workers=0)

    val_path = os.path.join(config.data, "*/test")
    val_dataset = SingleImgDataset(val_path,
                                   config,
                                   scale_aug=False,
                                   rot_aug=False,
                                   test_mode=True)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage1_batch_size,
        shuffle=False,
        num_workers=0)

    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))

    trainer = ModelNetTrainer(cnet,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=1)
    trainer.train(config, config.stage1_batch_size)
    #cnet.load(os.path.join(log_dir, config.snapshot_prefix + str(30)))

    # STAGE 2
    log(config.log_file, '--------------stage 2--------------')
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')
    create_folder(log_dir)
    cnet_2 = MVCNN(cnet, config)
    del cnet

    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    train_dataset = MultiviewImgDataset(train_path,
                                        config,
                                        scale_aug=False,
                                        rot_aug=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0
    )  # shuffle needs to be false! it's done within the trainer

    val_dataset = MultiviewImgDataset(val_path,
                                      config,
                                      scale_aug=False,
                                      rot_aug=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)
    log(config.log_file,
        'num_train_files: ' + str(len(train_dataset.filepaths)))
    log(config.log_file, 'num_val_files: ' + str(len(val_dataset.filepaths)))
    trainer = ModelNetTrainer(cnet_2,
                              train_loader,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)
    trainer.train(config, config.stage2_batch_size)
Exemplo n.º 5
0
    # make summary folder
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

if __name__ == '__main__':
    args = parser.parse_args()

    pretraining = not args.no_pretraining
    log_dir = os.path.join(args.log_path, args.name)
    create_folder(log_dir)
    config_f = open(os.path.join(log_dir, 'config.json'), 'w')
    json.dump(vars(args), config_f)
    config_f.close()

    # STAGE 1
    cnet = SVCNN(args.name, nclasses=args.num_class, pretraining=pretraining, cnn_name=args.cnn_name)
    n_models_train = args.num_models*args.num_views
    
    def stage1():
        log_dir = os.path.join(args.log_path, args.name, args.name+'_stage_1')
        create_folder(log_dir)

        optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    

        train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

        val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
        print('num_train_files: '+str(len(train_dataset.filepaths)))
Exemplo n.º 6
0
	run_dir = os.path.join(ckpt_dir, run_folder)
	if not os.path.exists(run_dir):
		os.makedirs(run_dir)
	
	
	pretraining = not args.no_pretraining
	log_dir = run_dir + '/' + args.name
	create_folder(log_dir)
	config_f = open(os.path.join(log_dir, 'config.json'), 'w')
	json.dump(vars(args), config_f)
	config_f.close()
	
	n_models_train = args.num_models * args.num_views
	
	# STAGE 1
	cnet = SVCNN(args.name, nclasses=40, pretraining=pretraining, cnn_name=args.cnn_name)

	if args.svcnn_training_mode == 'train':
		log_dir = run_dir + '/' + args.name + '_stage_1'
		create_folder(log_dir)
		
		optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)

		train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
		train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)

		val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
		val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
		print('num_train_files: '+str(len(train_dataset.filepaths)))
		print('num_val_files: '+str(len(val_dataset.filepaths)))
		trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
Exemplo n.º 7
0
if __name__ == '__main__':
    args = parser.parse_args()

    pretraining = not args.no_pretraining
    log_dir = args.name
    create_folder(args.name)
    config_f = open(os.path.join(log_dir, 'config.json'), 'w')
    json.dump(vars(args), config_f)
    config_f.close()

    # STAGE 1
    log_dir = args.name + '_stage_1'
    create_folder(log_dir)
    cnet = SVCNN(args.name,
                 nclasses=2,
                 pretraining=pretraining,
                 cnn_name=args.cnn_name)

    optimizer = optim.Adam(cnet.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    n_models_train = args.num_models * args.num_views

    train_dataset = SingleImgDataset(args.train_path,
                                     scale_aug=False,
                                     rot_aug=False,
                                     num_models=n_models_train,
                                     num_views=args.num_views)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batchSize * 3,
Exemplo n.º 8
0
    localtime = time.strftime('%Y_%m_%d_%H', time.localtime(time.time()))
    pretraining = not args.no_pretraining
    log_dir = args.name
    create_folder(args.name)
    config_f = open(os.path.join(log_dir, 'config.json'), 'w')
    json.dump(vars(args), config_f)
    config_f.close()
    num_feature = 1024
    # STAGE 1
    log_dir = 'smtcloss_wonor_' + args.name + '_stage_1_' + localtime
    create_folder(log_dir)

    vgg = models.vgg11(pretrained=True)
    cnet = SVCNN(args.name,
                 vgg,
                 num_feature,
                 nclasses=40,
                 pretraining=pretraining,
                 cnn_name=args.cnn_name)
    if (torch.cuda.is_available()):
        cnet = cnet.cuda()
        print('use GPU to train ')
    else:
        print('don.t use gpu')

    #有centor loss 时
    center_loss = Triplet_Center_Loss()
    softmax_loss = nn.CrossEntropyLoss()
    optimizer_model = optim.SGD(cnet.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=0.9)
Exemplo n.º 9
0
        nclasses = 40

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    use_dataparallel = args.use_dataparallel
    pretraining = not args.no_pretraining
    log_dir = args.name
    create_folder(args.name)
    config_f = open(os.path.join(log_dir, 'config.json'), 'w')
    json.dump(vars(args), config_f)
    config_f.close()

    # STAGE 1
    # log_dir = args.name+'_stage_1'
    # create_folder(log_dir)
    cnet = SVCNN(args.name, nclasses=nclasses, pretraining=pretraining, cnn_name=args.cnn_name,KNU_data=args.KNU_Data,
                 use_encdec=args.use_encdec, encdec_name=args.encdec_name, encdim=args.encdim)
    #
    # if use_dataparallel:
    #     cnet = nn.DataParallel(cnet)
    #     cnet.to(device)
    #
    # optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    #
    n_models_train = args.num_models*args.num_views

    # train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views,KNU_data=args.KNU_Data)
    # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)
    #
    # val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True,KNU_data=args.KNU_Data)
    # val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
    # print('num_train_files: '+str(len(train_dataset.filepaths)))