예제 #1
0
def main():
    global args, best_prec_result

    True_loader, Fake_loader, Noise_loader, Noise_Test_loader, Noise_Sample_loader, Noise_Triple_loader, All_loader, Test_loader, chIn, clsN = dataset_selector(
        args.db)
    args.chIn = chIn
    args.clsN = clsN
    args.milestones = [80, 120]
    args.Dmilestones = [30, 60]

    state_info = utils.model_optim_state_info()
    state_info.model_init(args)
    state_info.model_cuda_init()

    if cuda:
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        state_info.weight_cuda_init()
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(args)

    train_Sample(args, state_info, Noise_Sample_loader, Noise_Test_loader)
예제 #2
0
def main():
    global args, best_prec_result

    Train_loader, Test_loader, chIn, clsN = dataset_selector(args.db)
    AnchorSet = dataset.Cifar10_Sample(args)

    args.chIn = chIn
    args.clsN = clsN
    if not args.milestones:
        args.milestones = [250, 400]

    state_info = utils.model_optim_state_info()
    state_info.model_init(args)
    state_info.model_cuda_init()

    if cuda:
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        state_info.weight_cuda_init()
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(args)
    train_MEM(args, state_info, Train_loader, Test_loader, AnchorSet)
예제 #3
0
def main():
    global args, best_prec_result

    start_epoch = 0
    utils.default_model_dir = args.dir
    start_time = time.time()

    train_loader, test_loader, _, _ = dataset_selector(args.sd)

    state_info = utils.model_optim_state_info()
    state_info.model_init(args=args, num_class=10)
    state_info.model_cuda_init()
    # state_info.weight_init()
    state_info.optimizer_init(args)

    if cuda:
        print("USE", torch.cuda.device_count(), "GPUs!")
        cudnn.benchmark = True

    checkpoint = utils.load_checkpoint(utils.default_model_dir, is_last=False)
    if checkpoint:
        start_epoch = checkpoint['epoch'] + 1
        best_prec_result = checkpoint['Best_Prec']
        state_info.load_state_dict(checkpoint)

    check_selection(state_info, train_loader)
    check_selection(state_info, test_loader)
예제 #4
0
def main():
    global args, best_prec_result

    start_epoch = 0
    utils.default_model_dir = args.dir
    start_time = time.time()

    train_loader, test_loader, ch, wh = dataset_selector(args.dataset)
    sample = extract_sample(train_loader)

    state_info = utils.model_optim_state_info()
    state_info.model_init(Img=[ch, wh],
                          H=args.h,
                          latent_size=args.latent_size,
                          num_class=10)
    state_info.model_cuda_init()
    state_info.weight_init()
    state_info.optimizer_init(args)

    if cuda:
        print("USE", torch.cuda.device_count(), "GPUs!")
        cudnn.benchmark = True

    state_info.learning_scheduler_init(args)

    for epoch in range(start_epoch, args.epoch):

        train(state_info, train_loader, epoch)
        test(state_info, test_loader, sample, epoch)

        state_info.learning_step()

    now = time.gmtime(time.time() - start_time)
    utils.print_log('{} hours {} mins {} secs for training'.format(
        now.tm_hour, now.tm_min, now.tm_sec))
예제 #5
0
def main():
    global args, best_prec_result
    
    utils.default_model_dir = args.dir
    start_time = time.time()

    Source_train_loader, Source_test_loader = dataset_selector(args.sd)
    Target_train_loader, Target_test_loader = dataset_selector(args.td)
    Target_shuffle_loader, _ = dataset_selector(args.td)

    state_info = utils.model_optim_state_info()
    state_info.model_init()
    state_info.model_cuda_init()

    if cuda:
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        state_info.weight_cuda_init()
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(lr=args.lr, b1=args.b1, b2=args.b2, weight_decay=args.weight_decay)

    start_epoch = 0

    checkpoint = utils.load_checkpoint(utils.default_model_dir)    
    if not checkpoint:
        state_info.learning_scheduler_init(args)
    else:
        start_epoch = checkpoint['epoch'] + 1
        best_prec_result = checkpoint['Best_Prec']
        state_info.load_state_dict(checkpoint)
        state_info.learning_scheduler_init(args, load_epoch=start_epoch)

    realS_sample_iter = iter(Source_train_loader)
    realT_sample_iter = iter(Target_train_loader)

    realS_sample = to_var(realS_sample_iter.next()[0], FloatTensor)
    realT_sample = to_var(realT_sample_iter.next()[0], FloatTensor)

    for epoch in range(args.epoch):
        
        train(state_info, Source_train_loader, Target_train_loader, Target_shuffle_loader, epoch)
        prec_result = test(state_info, Source_test_loader, Target_test_loader, realS_sample, realT_sample, epoch)
        
        if prec_result > best_prec_result:
            best_prec_result = prec_result
            filename = 'checkpoint_best.pth.tar'
            utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)

        filename = 'latest.pth.tar'
        utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)
        state_info.learning_step() 

    now = time.gmtime(time.time() - start_time)
    utils.print_log('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
예제 #6
0
def main():
    global args, best_prec_result

    start_epoch = 0
    utils.default_model_dir = args.dir
    start_time = time.time()

    train_loader, test_loader, _, _ = dataset_selector(args.sd)

    state_info = utils.model_optim_state_info()
    state_info.model_init(args=args, num_class=10)
    state_info.model_cuda_init()
    # state_info.weight_init()
    state_info.optimizer_init(args)

    if cuda:
        print("USE", torch.cuda.device_count(), "GPUs!")
        cudnn.benchmark = True

    checkpoint = utils.load_checkpoint(utils.default_model_dir, is_last=True)
    if checkpoint:
        start_epoch = checkpoint['epoch'] + 1
        best_prec_result = checkpoint['Best_Prec']
        state_info.load_state_dict(checkpoint)

    for epoch in range(0, args.epoch):
        if epoch < 80:
            lr = args.lr
        elif epoch < 120:
            lr = args.lr * 0.1
        else:
            lr = args.lr * 0.01
        for param_group in state_info.optimizer.param_groups:
            param_group['lr'] = lr

        train(state_info, train_loader, epoch)
        prec_result = test(state_info, test_loader, epoch)

        if prec_result > best_prec_result:
            best_prec_result = prec_result
            filename = 'checkpoint_best.pth.tar'
            utils.save_state_checkpoint(state_info, best_prec_result, filename,
                                        utils.default_model_dir, epoch)
            utils.print_log('Best Prec : {:.4f}'.format(
                best_prec_result.item()))

        filename = 'latest.pth.tar'
        utils.save_state_checkpoint(state_info, best_prec_result, filename,
                                    utils.default_model_dir, epoch)

    now = time.gmtime(time.time() - start_time)
    utils.print_log('Best Prec : {:.4f}'.format(best_prec_result.item()))
    utils.print_log('{} hours {} mins {} secs for training'.format(
        now.tm_hour, now.tm_min, now.tm_sec))

    print('done')
예제 #7
0
def main():
    global args, best_prec_result

    train_labeled_dataset, train_unlabeled_dataset, test_dataset = dataset_selector()

    args.clsN = 10
    args.milestones = [80,120]
    
    state_info = utils.model_optim_state_info()
    state_info.model_init(args)
    state_info.model_cuda_init()

    if cuda:
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        state_info.weight_cuda_init()
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(args)
    train_MEM(args, state_info, train_labeled_dataset, train_unlabeled_dataset, test_dataset)
예제 #8
0
def main():
    global args, best_prec_result

    Train_loader, Test_loader, chIn, clsN = dataset_selector(args.db)

    args.chIn = chIn
    args.clsN = clsN
    # args.milestones = [150,225]

    state_info = utils.model_optim_state_info()
    state_info.model_init(args)
    if args.use_switch:
        utils.init_learning(state_info.model.module)

    if cuda:
        print("USE", torch.cuda.device_count(), "GPUs!")
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(args)
    train_Epoch(args, state_info, Train_loader, Test_loader)
예제 #9
0
def main():
    global args, best_prec_result

    utils.default_model_dir = args.dir
    start_time = time.time()

    Source_train_loader, Source_test_loader = dataset_selector(args.sd)
    Target_train_loader, Target_test_loader = dataset_selector(args.td)

    state_info = utils.model_optim_state_info()
    state_info.model_init()
    state_info.model_cuda_init()

    if cuda:
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        state_info.weight_cuda_init()
        cudnn.benchmark = True
    else:
        print("NO GPU")

    state_info.optimizer_init(lr=args.lr,
                              b1=args.b1,
                              b2=args.b2,
                              weight_decay=args.weight_decay)

    adversarial_loss = torch.nn.BCELoss()
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0

    utils.default_model_dir
    filename = 'latest.pth.tar'
    checkpoint = utils.load_checkpoint(utils.default_model_dir)
    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        best_prec_result = checkpoint['Best_Prec']
        state_info.load_state_dict(checkpoint)

    numEpochs = int(
        math.ceil(
            float(args.train_iters) /
            float(min(len(Source_train_loader), len(Target_train_loader)))))

    for epoch in range(numEpochs):
        # if epoch < 80:
        #     learning_rate = args.lr
        # elif epoch < 120:
        #     learning_rate = args.lr * 0.1
        # else:
        #     learning_rate = args.lr * 0.01
        # for param_group in optimizer.param_groups:
        #     param_group['lr'] = learning_rate

        train(state_info, Source_train_loader, Target_train_loader, criterion,
              adversarial_loss, epoch)
        prec_result = test(state_info, Source_test_loader, Target_test_loader,
                           criterion, epoch)

        if prec_result > best_prec_result:
            best_prec_result = prec_result
            filename = 'checkpoint_best.pth.tar'
            utils.save_state_checkpoint(state_info, best_prec_result, filename,
                                        utils.default_model_dir, epoch)

        if epoch % 5 == 0:
            filename = 'latest.pth.tar'
            utils.save_state_checkpoint(state_info, best_prec_result, filename,
                                        utils.default_model_dir, epoch)

    now = time.gmtime(time.time() - start_time)
    utils.print_log('{} hours {} mins {} secs for training'.format(
        now.tm_hour, now.tm_min, now.tm_sec))