Beispiel #1
0
def get_model(model_name, num_classes=10, keep_prob=1.0, **kwargs):
    if model_name.lower() == "lenetknn":
        return leNet_KNN.LeNet5KNN(num_classes=num_classes,
                                   keep_prob=keep_prob,
                                   **kwargs)
    elif model_name.lower() == 'lenet':
        return leNet.LeNet5(num_classes=num_classes, keep_prob=keep_prob)
    elif model_name.lower() == 'resnet18':
        return resnet.ResNet18(num_classes=num_classes)
    elif model_name.lower() == 'resnet34':
        return resnet.ResNet34(num_classes=num_classes)
    elif model_name.lower() == 'resnet101':
        return resnet.ResNet101(num_classes=num_classes)
    elif model_name.lower() == "resnet18knn":
        return resnet_KNN.ResNet18(num_classes=num_classes,
                                   kernel_fn=kwargs["kernel_fn"])
    elif model_name.lower() == "resnet101knn":
        return resnet_KNN.ResNet101(num_classes=num_classes,
                                    kernel_fn=kwargs["kernel_fn"])
    elif model_name.lower() == 'lenetkcnn':
        return leNet_KNN.LeNet5KCNN(num_classes=num_classes,
                                    keep_prob=keep_prob,
                                    **kwargs)
    elif model_name.lower() == 'resnet101kcnn':
        return resnet_KNN.ResNet101KCNN(num_classes=num_classes,
                                        keep_prob=keep_prob,
                                        **kwargs)
    else:
        raise ValueError("Unknown model name {}".format(model_name))
Beispiel #2
0
def main():
    # argument parsing
    args = argparse.ArgumentParser()
    args = get_arguments()
    args.device = torch.device('cuda', args.gpu_id)

    #dataset setting
    if args.resume == 1:
        s_train_dataloader, s_test_dataloader, t_train_dataloader, t_test_dataloader = make_biased_dataloader(
            args)
    elif args.resume == 0:
        s_train_dataloader, s_test_dataloader, t_train_dataloader, t_test_dataloader = make_biased_dataloader(
            args)

    concat_dataset = ConcatDataset(s_train_dataloader.dataset,
                                   t_train_dataloader.dataset)
    concat_dataset_test = ConcatDataset(s_test_dataloader.dataset,
                                        t_test_dataloader.dataset)
    train_loader = DataLoader(concat_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(concat_dataset_test,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=4)

    net = resnet.ResNet34(num_classes=10).to(args.device)
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=args.wdecay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20)

    if args.resume == 1:
        checkpoint = torch.load('./checkpoint/Source_Only/' + 'RN_' +
                                args.train_mode + '_' + '85')
        net.load_state_dict(checkpoint)

    path = './checkpoint/Source_Only/' + 'RN_' + args.train_mode + '_'
    best_acc = 0
    for epoch in range(args.epochs):
        train(args, net, train_loader, optimizer, scheduler, epoch)
        acc = test(args, net, test_loader, optimizer, scheduler, epoch)
        scheduler.step()
        if best_acc < acc:
            if not os.path.isdir('checkpoint/Source_Only/'):
                os.makedirs('checkpoint/Source_Only/')
            torch.save(net.state_dict(), path + str(epoch))
Beispiel #3
0
def main():
	
	
	if args.resume:
		if not os.path.isfile('./checkpoint/{}.pkl'.format(args.model)):
			raise ValueError('no models saved....!!!!')
		print 'resume from checkpoint....'
		net = torch.load('./checkpoint/{}.pkl'.format(args.model))
	else:
		if args.model == 'vgg16':
			net = vgg.VGG(args.model)
		elif args.model == 'vgg19':
			net = vgg.VGG(args.model)
		elif args.model == 'resnet18':
			net = resnet.ResNet18()
		elif args.model == 'resnet34':
			net = resnet.ResNet34()
		elif args.model == 'resnet50':
			net = resnet.ResNet50()
		elif args.model == 'resnet101':
			net = resnet.ResNet101()
		elif args.model == 'resnet152':
			net = resnet.ResNet152()
		elif args.model == 'densenet121':
			net = densenet.DenseNet121()
		elif args.model == 'densenet161':
			net = densenet.DenseNet161()
		elif args.model == 'densenet169':
			net = densenet.DenseNet169()
		elif args.model == 'densenet201':
			net = densenet.DenseNet201()
		else:
			raise ValueError('model not implemented...!!')


	net.cuda(args.gpu)
	net = nn.DataParallel(net, device_ids = range(torch.cuda.device_count()))
	criterion = nn.CrossEntropyLoss().cuda(args.gpu)
	optim = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
	lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=100, gamma=0.1)



	for e in xrange(args.epoch):
		train(e, net, criterion, optim, lr_scheduler)
		test(e, net)
Beispiel #4
0
def run(train_batch_size, epochs, lr, weight_decay, config, exp_id, log_dir, trained_model_file, model_name, disable_gpu=False):
    # 日志工具
    def logging(s, log_path, print_=True, log_=True):
        if print_:
            print(s)
        if log_:
            with open(log_path, 'a+') as f_log:
                f_log.write(s + '\n')
    def get_logger(log_path, **kwargs):
        return functools.partial(logging, log_path=log_path, **kwargs)

    logging = get_logger('./logger/log.txt')

    # 加载数据集
    if config['test_ratio']:
        train_loader, val_loader, test_loader = get_data_loaders(config, train_batch_size, exp_id)
    else:
        train_loader, val_loader = get_data_loaders(config, train_batch_size, exp_id)

    device = torch.device("cuda" if not disable_gpu and torch.cuda.is_available() else "cpu")

    if model_name == 'CNNIQA':
        model = cnn.CNNIQAnet()
    if model_name == 'lenet5':
        model = lenet5.LeNet5()
    if model_name == 'resnet18':
        model = resnet.ResNet18()
    if model_name == 'resnet34':
        model = resnet.ResNet34()
    if model_name == 'vgg19':
        model = vgg.VGG('VGG19')

    writer = SummaryWriter(log_dir=log_dir)
    model = model.to(device) # 将模型加载到指定设备上
    # summary(model, input_size=(32, 32))  # must remove the number of N

    # print("model:", model)
    # logging("model: {}".format(model))
    # if multi_gpu and torch.cuda.device_count() > 1:
    #     model = nn.DataParallel(model)

    optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    global best_criterion
    best_criterion = -1  # SROCC >= -1
    # 训练器,调库
    trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
    # 校验器,调库
    evaluator = create_supervised_evaluator(model,
                                            metrics={'IQA_performance': IQAPerformance()},
                                            device=device)

    # 函数修饰器,以下函数都包含在trainer中,因此是一边训练一边验证、测试
    # training/validation/testing = 0.6/0.2/0.2,每一个epoch训练完都进行validation和testing
    @trainer.on(Events.ITERATION_COMPLETED)
    def log_training_loss(engine):
        writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_validation_results(engine):
        evaluator.run(val_loader)
        metrics = evaluator.state.metrics
        SROCC, KROCC, PLCC, RMSE, MAE, OR = metrics['IQA_performance']
        # print("Validation Results - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
        #       .format(engine.state.epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
        logging("Validation Results - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
              .format(engine.state.epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
        writer.add_scalar("validation/SROCC", SROCC, engine.state.epoch)
        writer.add_scalar("validation/KROCC", KROCC, engine.state.epoch)
        writer.add_scalar("validation/PLCC", PLCC, engine.state.epoch)
        writer.add_scalar("validation/RMSE", RMSE, engine.state.epoch)
        writer.add_scalar("validation/MAE", MAE, engine.state.epoch)
        writer.add_scalar("validation/OR", OR, engine.state.epoch)
        global best_criterion
        global best_epoch
        if SROCC > best_criterion:
            best_criterion = SROCC
            best_epoch = engine.state.epoch
            # 保存最佳模型,以SROCC指标为准
            # _use_new_zipfile_serialization = False适用于pytorch1.6以前的版本
            torch.save(model.state_dict(), trained_model_file, _use_new_zipfile_serialization=False)


    @trainer.on(Events.EPOCH_COMPLETED)
    def log_testing_results(engine):
        if config["test_ratio"] > 0 and config['test_during_training']:
            evaluator.run(test_loader)
            metrics = evaluator.state.metrics
            SROCC, KROCC, PLCC, RMSE, MAE, OR = metrics['IQA_performance']
            # print("Testing Results    - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
            #       .format(engine.state.epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
            logging("Testing Results     - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
                  .format(engine.state.epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
            writer.add_scalar("testing/SROCC", SROCC, engine.state.epoch)
            writer.add_scalar("testing/KROCC", KROCC, engine.state.epoch)
            writer.add_scalar("testing/PLCC", PLCC, engine.state.epoch)
            writer.add_scalar("testing/RMSE", RMSE, engine.state.epoch)
            writer.add_scalar("testing/MAE", MAE, engine.state.epoch)
            writer.add_scalar("testing/OR", OR, engine.state.epoch)

    @trainer.on(Events.COMPLETED)
    def final_testing_results(engine):
        if config["test_ratio"]:
            model.load_state_dict(torch.load(trained_model_file))
            evaluator.run(test_loader)
            metrics = evaluator.state.metrics
            SROCC, KROCC, PLCC, RMSE, MAE, OR = metrics['IQA_performance']
            global best_epoch
            # best test results 是 validation的SROCC最高的一次
            # print("Final Test Results - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
            #     .format(best_epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
            logging("Final Test Results - Epoch: {} SROCC: {:.4f} KROCC: {:.4f} PLCC: {:.4f} RMSE: {:.4f} MAE: {:.4f} OR: {:.2f}%"
                .format(best_epoch, SROCC, KROCC, PLCC, RMSE, MAE, 100 * OR))
            np.save(save_result_file, (SROCC, KROCC, PLCC, RMSE, MAE, OR))

    # kick everything off
    # 执行训练
    trainer.run(train_loader, max_epochs=epochs)

    writer.close()
Beispiel #5
0
                        default='models/CNNIQA-LIVE',
                        help="model file (default: models/CNNIQA-LIVE)")

    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if args.model_file == 'models/CNNIQA-LIVE':
        model = cnn.CNNIQAnet(ker_size=7,
                              n_kers=50,
                              n1_nodes=800,
                              n2_nodes=800).to(device)
    if args.model_file == 'models/resnet18-LIVE':
        model = resnet.ResNet18().to(device)
    if args.model_file == 'models/resnet34-LIVE':
        model = resnet.ResNet34().to(device)
    if args.model_file == 'models/lenet5-LIVE':
        model = lenet5.LeNet5().to(device)
    if args.model_file == 'models/vgg19-LIVE':
        model = vgg.VGG('VGG19').to(device)

    model.load_state_dict(torch.load(args.model_file))

    img = Image.open(args.im_path).convert('L')
    patches = NonOverlappingCropPatches(img, 32, 32)

    model.eval()
    with torch.no_grad():
        patch_scores = model(torch.stack(patches).to(device))
        print(patch_scores.mean())
        print(patch_scores.mean().item())
Beispiel #6
0
def main(args):
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_idx)
    os.environ["CUDA_DEVICE"] = str(args.gpu_idx)

    np.random.seed(0)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu_idx)

    out_file = os.path.join(args.output_dir,
                            '{}_{}.pth'.format(args.net_type, args.dataset))

    # set the transformations for training
    tfs_for_augmentation = [
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ]
    if args.dataset == 'cifar10':
        train_transform = transforms.Compose(tfs_for_augmentation + [
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435,
                                                            0.2616))
        ])
        test_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4821, 0.4465),
                                 (0.2470, 0.2435, 0.2616)),
        ])
    elif args.dataset == 'cifar100':
        train_transform = transforms.Compose(tfs_for_augmentation + [
            transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564,
                                                            0.2762)),
        ])
        test_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4865, 0.4409),
                                 (0.2673, 0.2564, 0.2762)),
        ])
    elif args.dataset == 'svhn':
        train_transform = transforms.Compose([transforms.ToTensor()])
        test_transform = transforms.Compose([transforms.ToTensor()])

    # load model
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = densenet.DenseNet3(100,
                                       args.num_classes,
                                       growth_rate=12,
                                       dropRate=0.2)
        else:
            model = densenet.DenseNet3(100, args.num_classes, growth_rate=12)
    elif args.net_type == 'resnet':
        model = resnet.ResNet34(num_c=args.num_classes)
    elif args.net_type == 'vanilla':
        model = vanilla.VanillaCNN(args.num_classes)
    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ' + args.dataset)
    if args.dataset == 'svhn':
        train_loader, valid_loader = data_utils.get_dataloader(
            args.dataset,
            args.data_root,
            'train',
            train_transform,
            args.batch_size,
            valid_transform=test_transform)
    else:
        train_loader = data_utils.get_dataloader(args.dataset, args.data_root,
                                                 'train', train_transform,
                                                 args.batch_size)
    test_loader = data_utils.get_dataloader(args.dataset, args.data_root,
                                            'test', test_transform,
                                            args.batch_size)

    # define objective and optimizer
    criterion = nn.CrossEntropyLoss()
    if args.net_type == 'densenet' or args.net_type == 'vanilla':
        weight_decay = 1e-4
        milestones = [150, 225]
        gamma = 0.1
    elif args.net_type == 'resnet':
        weight_decay = 5e-4
        milestones = [60, 120, 160]
        gamma = 0.2
    if args.dataset == 'svhn' or args.net_type == 'vanilla':
        milestones = [20, 30]

    optimizer = optim.SGD(model.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones,
                                               gamma=gamma)

    # train
    best_loss = np.inf
    iter_cnt = 0
    for epoch in range(args.num_epochs):
        model.train()
        total, total_loss, total_step = 0, 0, 0
        for _, (data, labels) in enumerate(train_loader):
            data = data.cuda()
            labels = labels.cuda()
            total += data.size(0)

            outputs = model(data)
            loss = criterion(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item() * data.size(0)
            iter_cnt += 1
            total_step += 1

            if args.dataset == 'svhn' and iter_cnt >= 200:
                valid_loss, _ = evaluation(model, valid_loader, criterion)
                test_loss, acc = evaluation(model, test_loader, criterion)
                print(
                    'Epoch [{:03d}/{:03d}], step [{}/{}] train loss : {:.4f}, valid loss : {:.4f}, test loss : {:.4f}, test acc : {:.2f} %'
                    .format(epoch + 1, args.num_epochs, total_step,
                            len(train_loader), total_loss / total, valid_loss,
                            test_loss, 100 * acc))
                if valid_loss < best_loss:
                    best_loss = valid_loss
                    torch.save(model, out_file)
                iter_cnt = 0
                model.train()

        if args.dataset != 'svhn':
            test_loss, acc = evaluation(model, test_loader, criterion)
            print(
                '[{:03d}/{:03d}] train loss : {:.4f}, test loss : {:.4f}, test acc : {:.2f} %'
                .format(epoch + 1, args.num_epochs, total_loss / total,
                        test_loss, 100 * acc))
            torch.save(model, out_file)

        scheduler.step()