def main():
    model = model_dispatcher.MODEL_DISPATCHER[BASE_MODEL](True)
    #model.to(DEVICE)

    train_dataset = BengaliDatasetTrain(TRAINING_FOLDS, IMG_HEIGHT, IMG_WIDTH,
                                        MODEL_MEAN, MODEL_STD)

    train_loader = dataloader.DataLoader(dataset=train_dataset,
                                         batch_size=TEST_BATCH_SIZE,
                                         shuffle=True,
                                         num_workers=4)

    valid_dataset = BengaliDatasetTrain(VALIDATION_FOLDS, IMG_HEIGHT,
                                        IMG_WIDTH, MODEL_MEAN, MODEL_STD)

    valid_loader = dataloader.DataLoader(dataset=valid_dataset,
                                         batch_size=TEST_BATCH_SIZE,
                                         shuffle=False,
                                         num_workers=4)

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='min',
                                                           patience=5,
                                                           factor=0.3,
                                                           verbose=True)

    for epoch in range(EPOCHS):
        train(train_dataset, train_loader, model, optimizer)
        val_score = evaluate(valid_dataset, valid_loader, model, optimizer)
        scheduler.step(val_score)
        torch.save(model.state_dict(),
                   f'{BASE_MODEL}_{VALIDATION_FOLDS[0]}.bin')
Example #2
0
    def __init__(self):
        train_transform = transforms.Compose([
            transforms.Resize((384, 128), interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0])
        ])

        test_transform = transforms.Compose([
            transforms.Resize((384, 128), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        # self.trainset = Market1501(train_transform, 'train', opt.data_path)

        self.trainset = AlMarket1501(train_transform, 'train', opt.data_path)

        self.testset = Market1501(test_transform, 'test', opt.data_path)
        self.queryset = Market1501(test_transform, 'query', opt.data_path)

        self.train_loader = dataloader.DataLoader(self.trainset,
                                                  sampler=RandomSampler(self.trainset, batch_id=opt.batchid,
                                                                        batch_image=opt.batchimage),
                                                  batch_size=opt.batchid * opt.batchimage, num_workers=8,
                                                  pin_memory=True)
        self.test_loader = dataloader.DataLoader(self.testset, batch_size=opt.batchtest, num_workers=8, pin_memory=True)
        self.query_loader = dataloader.DataLoader(self.queryset, batch_size=opt.batchtest, num_workers=8,
                                                  pin_memory=True)

        if opt.mode == 'vis':
            self.query_image = test_transform(default_loader(opt.query_image))
Example #3
0
    def __init__(self, args):
        self.loader_train = None

        if not args.test_only and not args.demo:
            data_train = SRdatasets(args, name=args.data_name, train=True)
            # train_sampler = torch.utils.data.distributed.DistributedSampler(data_train)
            self.loader_train = DataLoader.DataLoader(
                data_train,
                batch_size=args.batch_size,
                num_workers=4,
                shuffle=True)

        if args.demo:
            data_demo = Demo(args, name=args.demo_name, train=False)
            self.loader_demo = DataLoader.DataLoader(data_demo,
                                                     batch_size=1,
                                                     shuffle=False)

        self.loader_test = []
        # Only Test one dataset
        if not args.demo:
            for test_data in args.test_name:
                data_test = SRdatasets(args, name=test_data, train=False)
                self.loader_test.append(
                    DataLoader.DataLoader(data_test,
                                          batch_size=1,
                                          num_workers=4,
                                          shuffle=False))
def makeDataLoader(config):
    # setup data_set, data_process instances
    test_set = config.init_obj('test_set', module_data_process)
    test_dataloader = module_dataloader.DataLoader(test_set, batch_size=test_set.batch_size,
                                                   num_workers=test_set.num_workers, collate_fn=test_set.collate_fn)

    if not config['k_fold'] > 0:
        train_set = config.init_obj('train_set', module_data_process)
        valid_set = config.init_obj('valid_set', module_data_process)

        train_dataloader = module_dataloader.DataLoader(train_set, batch_size=train_set.batch_size,
                                                        num_workers=train_set.num_workers, collate_fn=train_set.collate_fn)
        valid_dataloader = module_dataloader.DataLoader(valid_set, batch_size=valid_set.batch_size,
                                                        num_workers=valid_set.num_workers, collate_fn=valid_set.collate_fn)

        yield 0,train_dataloader, valid_dataloader, test_dataloader
    else:
        logger = config.get_logger('train')
        logger.info('making {} fold data'.format(config['k_fold']))
        all_set = config.init_obj('all_set', module_data_process)
        for i,train_index,valid_index in all_set.make_k_fold_data(config['k_fold']):
            train_set = torch.utils.data.dataset.Subset(all_set, train_index)
            valid_set = torch.utils.data.dataset.Subset(all_set,valid_index)
            train_dataloader = module_dataloader.DataLoader(train_set, batch_size=all_set.batch_size,
                                                            num_workers=all_set.num_workers,drop_last=True,
                                                            collate_fn=all_set.collate_fn)
            valid_dataloader = module_dataloader.DataLoader(valid_set, batch_size=all_set.batch_size,
                                                            num_workers=all_set.num_workers,
                                                            collate_fn=all_set.collate_fn)

            yield i,train_dataloader, valid_dataloader, test_dataloader
    def __init__(self, model, training_set, testing_set, epochs, optimizer,
                 loss):
        # Training variables
        self.model = model
        self.epochs = epochs
        self.optimizer = optimizer
        self.loss = loss

        # Training and testing data as Dataset-objects
        self.training_set = training_set
        self.testing_set = testing_set

        # Using DataLoader to load the datasets
        self.training_data = dataloader.DataLoader(training_set)
        self.testing_data = dataloader.DataLoader(testing_set)

        # Containers for visualizing
        self.train_loss = []
        self.val_loss = []
        self.val_accuracy = []

        # Best model during training
        self.best_model = copy.deepcopy(self.model)
        self.best_loss = float("inf")
        self.best_accuracy = 0.0  # 100.0 = 100%, 0.0 = 0%
        self.best_results = [
        ]  # list containing all results from best prediction
Example #6
0
    def __init__(self):
        # paper is (384, 128)
        train_transform = transforms.Compose([
            transforms.Resize((256, 256), interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0])
        ])
        test_transform = transforms.Compose([
            transforms.Resize((256, 256), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        self.trainset = Car(transforms=train_transform, root=opt.data_path)
        self.testset = Car(transforms=test_transform, root=opt.data_path)
        self.train_loader = dataloader.DataLoader(
            self.trainset,
            sampler=RandomSampler(self.trainset,
                                  batch_id=opt.batchid,
                                  batch_image=opt.batchimage),
            batch_size=opt.batchid * opt.batchimage,
            num_workers=8,
            pin_memory=True)
        self.test_loader = dataloader.DataLoader(self.testset,
                                                 batch_size=opt.batchtest,
                                                 num_workers=1,
                                                 pin_memory=True)
Example #7
0
 def __init__(self, args, data_type):
     self.args = args
     # transform_list = [
     #     transforms.RandomChoice(
     #         [transforms.RandomHorizontalFlip(),
     #          transforms.RandomGrayscale(),
     #          transforms.RandomRotation(20),
     #          ]
     #     ),
     #     transforms.ToTensor(),
     #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
     #                          0.229, 0.224, 0.225])
     # ]
     # transform = transforms.Compose(transform_list)
     transform = transforms.Compose([
         transforms.Resize((224, 224)),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     self.train_dataset = Dataset(args, data_type, transform)
     self.train_loader = dataloader.DataLoader(
         self.train_dataset,
         shuffle=True,
         batch_size=args.train_batch_size,
         num_workers=args.nThread)
     self.valid_loader = dataloader.DataLoader(
         self.train_dataset,
         shuffle=False,
         batch_size=args.train_batch_size,
         num_workers=args.nThread)
Example #8
0
    def __init__(self, args):

        train_list = [
            transforms.Resize((args.height, args.width), interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]
        if args.random_erasing:
            train_list.append(
                RandomErasing(probability=args.probability,
                              mean=[0.0, 0.0, 0.0]))

        train_transform = transforms.Compose(train_list)

        test_transform = transforms.Compose([
            transforms.Resize((args.height, args.width), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        if not args.test_only:
            module_train = import_module('data.' + args.data_train.lower())
            self.trainset = getattr(module_train,
                                    args.data_train)(args, train_transform,
                                                     'train')
            self.train_loader = dataloader.DataLoader(
                self.trainset,
                sampler=RandomSampler(self.trainset,
                                      args.batchid,
                                      batch_image=args.batchimage),
                #shuffle=True,
                batch_size=args.batchid * args.batchimage,
                num_workers=args.nThread)
        else:
            self.train_loader = None

        if args.data_test in [
                'Market1501', 'Market1501_folder', 'MSMT17_folder',
                'CUHK03_folder', 'SYSU30K_folder'
        ]:
            module = import_module('data.' + args.data_train.lower())
            self.testset = getattr(module,
                                   args.data_test)(args, test_transform,
                                                   'test')
            self.queryset = getattr(module,
                                    args.data_test)(args, test_transform,
                                                    'query')

        else:
            raise Exception()

        self.test_loader = dataloader.DataLoader(self.testset,
                                                 batch_size=args.batchtest,
                                                 num_workers=args.nThread)
        self.query_loader = dataloader.DataLoader(self.queryset,
                                                  batch_size=args.batchtest,
                                                  num_workers=args.nThread)
Example #9
0
    def __init__(self, args):
        self.loader_train = None
        if not args.test_only:
            datasets = []
            for d in args.data_train:
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
                m = import_module('data.' + module_name.lower())
                datasets.append(getattr(m, module_name)(args, name=d))

            self.loader_train = dataloader.DataLoader(
                MyConcatDataset(datasets),
                batch_size=args.batch_size,
                shuffle=True,
                pin_memory=not args.cpu,
                num_workers=args.n_threads,
            )

        self.loader_test = []
        for d in args.data_test:
            if d in ['SVNH', 'Set5', 'Set14', 'B100', 'Urban100']:
                m = import_module('data.benchmark')
                testset = getattr(m, 'Benchmark')(args, train=False, name=d)
            else:
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
                m = import_module('data.' + module_name.lower())
                testset = getattr(m, module_name)(args, train=False, name=d)
            self.loader_test.append(
                dataloader.DataLoader(
                    testset,
                    batch_size=1,
                    shuffle=False,
                    pin_memory=not args.cpu,
                    num_workers=args.n_threads,
                ))
def load_mnist_data(valid_size=0.1, shuffle=True, random_seed=2008, batch_size = 64,
                    num_workers = 1):
    """
        We return train and test for plots and post-training experiments
    """
    transform = transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])

    train = MNIST('data/MNIST', train=True, download=True, transform=transform)
    test  = MNIST('data/MNIST', train=False, download=True, transform=transform)

    num_train = len(train)
    indices = list(range(num_train))
    split = int(np.floor(valid_size * num_train))
    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    if shuffle == True:
        np.random.seed(random_seed)
        np.random.shuffle(indices)


    # Create DataLoader
    dataloader_args = dict(batch_size=batch_size,num_workers=num_workers)
    train_loader = dataloader.DataLoader(train, sampler=train_sampler, **dataloader_args)
    valid_loader = dataloader.DataLoader(train, sampler=valid_sampler, **dataloader_args)
    dataloader_args['shuffle'] = False
    test_loader = dataloader.DataLoader(test, **dataloader_args)

    return train_loader, valid_loader, test_loader, train, test
Example #11
0
def get_vqa_dataset(config):
    transform = get_image_transform()
    if 'clevr' in config.data.name:
        train_dataset = ClevrDataset(image_root_path=config.data.train_image_root_path,
                                     question_json_path=config.data.train_question_path,
                                     question_vocab_path=config.data.question_vocab_path,
                                     answer_vocab_path=config.data.answer_vocab_path,
                                     transform=transform, data_nums=config.data.train_data_nums)
        if config.data.valid_question_path is not None:
            valid_dataset = ClevrDataset(image_root_path=config.data.valid_image_root_path,
                                         question_json_path=config.data.valid_question_path,
                                         question_vocab_path=config.data.question_vocab_path,
                                         answer_vocab_path=config.data.answer_vocab_path,
                                         transform=transform, data_nums=config.data.valid_data_nums)
        else:
            train_size = int((1 - config.train.valid_percent) * len(train_dataset))
            val_size = len(train_dataset) - train_size
            train_dataset, valid_dataset = random_split(train_dataset, [train_size, val_size])
            train_dataset = train_dataset.dataset
            valid_dataset = valid_dataset.dataset
    else:
        raise Exception(f'Not support data {config.data.name}')
    train_loader = dataloader.DataLoader(dataset=train_dataset, batch_size=config.train.batch_size,
                                         num_workers=config.train.num_workers,
                                         shuffle=config.train.shuffle, collate_fn=train_dataset.collate_fn)
    valid_loader = dataloader.DataLoader(dataset=valid_dataset, batch_size=config.train.batch_size,
                                         num_workers=config.train.num_workers,
                                         shuffle=config.train.shuffle, collate_fn=valid_dataset.collate_fn)
    return train_dataset, train_loader, valid_dataset, valid_loader
Example #12
0
    def __init__(self):
        train_transform = transforms.Compose([
            RandomE(sig=0.05),
            transforms.Resize((384, 144), interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0])])

        test_transform = transforms.Compose([
            transforms.Resize((384, 144), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

        self.trainset = Market1501(train_transform, 'train',opt.data_path)
        print('train_len:', len(self.trainset))
        self.testset = Market1501(test_transform, 'test',opt.data_path)
        print('test_len:', len(self.testset))
        self.queryset = Market1501(test_transform, 'query',opt.data_path)
        print('query_len:', len(self.queryset))
        self.testset1 = Market1501(test_transform, 'test1', opt.data_path)
        print('test_len:', len(self.testset1))
        self.queryset1 = Market1501(test_transform, 'query1', opt.data_path)
        print('query_len:', len(self.queryset1))
        self.train_loader = dataloader.DataLoader(self.trainset,
                                                  sampler=RandomSampler(self.trainset, batch_id=opt.batchid, batch_image=opt.batchimage),
                                                  batch_size=opt.batchid*opt.batchimage, num_workers=8,pin_memory=True)
        self.test_loader = dataloader.DataLoader(self.testset, batch_size=opt.batchtest, num_workers=8,pin_memory=True)
        self.query_loader = dataloader.DataLoader(self.queryset, batch_size=opt.batchtest, num_workers=8,pin_memory=True)
        self.test_loader1 = dataloader.DataLoader(self.testset1, batch_size=opt.batchtest, num_workers=8, pin_memory=True)
        self.query_loader1 = dataloader.DataLoader(self.queryset1, batch_size=opt.batchtest, num_workers=8, pin_memory=True)
Example #13
0
def get_loaders(train_batch_size=128,
                test_batch_size=1024,
                shuffle=False,
                root='./data'):

    train_set = MNIST(root=root,
                      train=True,
                      transform=transforms.ToTensor(),
                      download=True)
    train_loader = dataloader.DataLoader(dataset=train_set,
                                         batch_size=train_batch_size,
                                         shuffle=shuffle,
                                         num_workers=8,
                                         pin_memory=True)

    test_set = MNIST(root=root,
                     train=False,
                     transform=transforms.ToTensor(),
                     download=True)
    test_loader = dataloader.DataLoader(dataset=test_set,
                                        batch_size=test_batch_size,
                                        shuffle=False,
                                        num_workers=8,
                                        pin_memory=True)
    return train_loader, test_loader
Example #14
0
    def __init__(self, root):
        # 创建tensorboard数据记录目录
        self.summaryWriter = SummaryWriter("./logs")
        # 加载训练数据集
        self.train_dataset = MNISTDataset(root, True)
        # 将60000份数据分成每批次100个数据
        self.train_dataLoader = dataloader.DataLoader(self.train_dataset,
                                                      batch_size=100,
                                                      shuffle=True)

        # 加载测试数据集
        self.test_dataset = MNISTDataset(root, True)
        # 将10000份数据分成每批次100个数据
        self.test_dataLoader = dataloader.DataLoader(self.test_dataset,
                                                     batch_size=100,
                                                     shuffle=True)

        # 创建模型
        self.net = NetV3()
        # 加载保存的参数
        # self.net.load_state_dict(torch.load("./checkpoint/11.pkl"))
        self.net.cuda()

        # 创建优化器
        self.opt = optim.Adam(self.net.parameters())
Example #15
0
def main():

    # 自适应使用GPU还是CPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Net().to(device)

    optimizer = torch.optim.Adam(model.parameters())
    criterion = torch.nn.CrossEntropyLoss()

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   shuffle=True)
    test_loader = Data.DataLoader(dataset=test_data, batch_size=batch_size)

    adversary = FGSMAttack(epsilon=0.2)

    for epoch in range(epochs):
        for t, (x, y) in enumerate(train_loader):

            x_var, y_var = to_var(x), to_var(y.long())
            loss = criterion(model(x_var), y_var)

            # adversarial training
            if epoch + 1 > delay:
                # use predicted label to prevent label leaking
                y_pred = pred_batch(x, model)
                x_adv = adv_train(x, y_pred, model, criterion, adversary)
                x_adv_var = to_var(x_adv)
                loss_adv = criterion(model(x_adv_var), y_var)
                loss = (loss + loss_adv) / 2

            if (t + 1) % 10 == 0:
                print('t = %d, loss = %.8f' % (t + 1, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        # 每跑完一次epoch测试一下准确率 进入测试模式 禁止梯度传递
        with torch.no_grad():
            correct = 0
            total = 0
            sum_val_loss = 0
            for data in test_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)

                val_loss = criterion(outputs, labels)
                sum_val_loss += val_loss.item()
                # 取得分最高的那个类
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum()
            print('epoch=%d accuracy=%.02f%% val_loss=%.02f%' %
                  (epoch + 1, (100 * correct / total), sum_val_loss))
            sum_val_loss = 0.0

    torch.save(model.state_dict(), './cifar-adv-pytorch/net.pth')
Example #16
0
def main():

    # 自适应使用GPU还是CPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Net().to(device)

    optimizer = torch.optim.Adam(model.parameters())
    loss_func = torch.nn.CrossEntropyLoss()

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   shuffle=True)
    test_loader = Data.DataLoader(dataset=test_data, batch_size=batch_size)

    for epoch in range(epochs):
        sum_loss = 0.0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            # 梯度清零
            optimizer.zero_grad()

            # forward + backward
            outputs = model(inputs)

            loss = loss_func(outputs, labels)
            loss.backward()
            optimizer.step()

            # 每训练100个batch打印一次平均loss
            sum_loss += loss.item()
            if i % 50 == 0:
                print('epoch=%d, batch=%d loss: %.04f' %
                      (epoch + 1, i, sum_loss / 100))
                sum_loss = 0.0
        # 每跑完一次epoch测试一下准确率 进入测试模式 禁止梯度传递
        with torch.no_grad():
            correct = 0
            total = 0
            sum_val_loss = 0
            for data in test_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)

                val_loss = loss_func(outputs, labels)
                sum_val_loss += val_loss.item()
                # 取得分最高的那个类
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum()
            print('epoch=%d accuracy=%.02f%% val_loss=%.02f%' %
                  (epoch + 1, (100 * correct / total), sum_val_loss))
            sum_val_loss = 0.0

    torch.save(model.state_dict(), './cifar-pytorch/net.pth')
    def __init__(self, args):
        print('[INFO] Making data...')
        #train_list = [
        #    transforms.Resize((args.height, args.width), interpolation=3),
        #    transforms.RandomHorizontalFlip(),
        #    transforms.ToTensor(),
        #    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        #]

        #train_transform = transforms.Compose(train_list)

        #test_transform = transforms.Compose([
        #    transforms.Resize((args.height, args.width), interpolation=3),
        #    transforms.ToTensor(),
        #    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        #])
        module_dataset = import_module('data.' + args.dataset.lower())
        feature_data, feature_dim = None, None  #getattr(module_dataset, 'import_feature')() #None, None

        if not args.test_only:
            self.trainset = getattr(module_dataset,
                                    args.dataset)(subset='training',
                                                  feature_data=feature_data,
                                                  feature_dim=feature_dim)
            self.train_loader = dataloader.DataLoader(
                self.trainset,
                #sampler=RandomSampler(self.trainset,args.batchid,batch_image=args.batchimage),
                shuffle=True,
                batch_size=args.batch_size,
                num_workers=args.workers)
        else:
            self.train_loader = None

        if args.dataset in ['ActivityNet']:
            module = import_module('data.' + args.dataset.lower())
            self.testset = getattr(module,
                                   args.dataset)(subset='validation',
                                                 feature_data=feature_data,
                                                 feature_dim=feature_dim)
            self.evaluateset = getattr(module,
                                       args.dataset)(subset='validation',
                                                     feature_data=feature_data,
                                                     feature_dim=feature_dim,
                                                     output_meta=True)
        else:
            raise Exception()

        self.test_loader = dataloader.DataLoader(self.testset,
                                                 batch_size=args.batch_size,
                                                 num_workers=args.workers)

        # For evaluation, sometimes we just need pseudo labels
        self.evaluate_loader = dataloader.DataLoader(
            self.evaluateset,
            batch_size=args.batch_size,
            num_workers=args.workers)

        print(len(self.trainset), len(self.testset), len(self.evaluateset))
Example #18
0
    def __init__(self):
        super(MNIST_optest, self).__init__()
        self.train_data = torchvision.datasets.MNIST(
            './mnist',
            train=True,
            transform=torchvision.transforms.ToTensor(),
            download=True)
        self.test_data = torchvision.datasets.MNIST(
            './mnist',
            train=False,
            transform=torchvision.transforms.ToTensor())
        print("train_data:", self.train_data.train_data.size())
        print("train_labels:", self.train_data.train_labels.size())
        print("test_data:", self.test_data.test_data.size())

        self.train_loader = Data.DataLoader(dataset=self.train_data,
                                            batch_size=64,
                                            shuffle=True)
        self.test_loader = Data.DataLoader(dataset=self.test_data,
                                           batch_size=64)
        self.batch_size = 64

        self.net = ResNet18().cuda()
        self.old_net = ResNet18().cuda()
        parameters = list(self.net.parameters())
        old_parameters = list(self.old_net.parameters())
        for i in range(len(parameters)):
            old_parameters[i] = parameters[i].detach()
        summary(self.net, input_size=(1, 28, 28))

        self.beta = 0.9
        self.beta_correction = 1

        self.kp = 0
        self.ki = 1
        self.kd = 1

        self.grad_saving = []
        self.old_grad = []

        self.optimizer = torch.optim.SGD(self.net.parameters(), 0.005)
        self.old_optimizer = torch.optim.SGD(self.old_net.parameters(), 0.005)
        self.loss_func = torch.nn.CrossEntropyLoss().cuda()
        self.grad_saving = []

        for batch_x, batch_y in self.train_loader:
            batch_x, batch_y = Variable(batch_x).cuda(), Variable(
                batch_y).cuda()
            out = self.net(batch_x)
            loss = self.loss_func(out, batch_y)
            self.optimizer.zero_grad()
            loss.backward()
            a = list(self.net.parameters())
            for i in range(len(a)):
                self.grad_saving.append(a[i].grad / 10000)
                self.old_grad.append(a[i].grad)
            break
Example #19
0
    def __init__(self, args):
        self.loader_train = None
        if not args.test_only:
            datasets = []
            for d in args.data_train:
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
                m = import_module('data.' + module_name.lower())
                datasets.append(getattr(m, module_name)(args, name=d))

            # add distributed training dataset sampler according to
            # https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_imagenet_resnet50.py
            self.train_sampler = torchDis.DistributedSampler(
                # taking concatenated datasets
                MyConcatDataset(datasets),

                # number of processes participarting in distributed training
                num_replicas=hvd.size(),

                # Rank of the current process within num_replicas
                rank=hvd.rank())

            self.loader_train = dataloader.DataLoader(

                # taking concatenated datasets
                MyConcatDataset(datasets),

                # how many samples per batch to load
                batch_size=args.batch_size,

                # data shuffling disabled when using sampler
                shuffle=False,
                pin_memory=not args.cpu,

                # threads used in GPU for data loading
                num_workers=args.n_threads,

                # added the new distributed sampler
                sampler=self.train_sampler)

        self.loader_test = []
        for d in args.data_test:
            if d in ['Set5', 'Set14', 'B100', 'Urban100']:
                m = import_module('data.benchmark')
                testset = getattr(m, 'Benchmark')(args, train=False, name=d)
            else:
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
                m = import_module('data.' + module_name.lower())
                testset = getattr(m, module_name)(args, train=False, name=d)

            self.loader_test.append(
                dataloader.DataLoader(
                    testset,
                    batch_size=1,
                    shuffle=False,
                    pin_memory=not args.cpu,
                    num_workers=args.n_threads,
                ))
Example #20
0
def __build_loader(data, batch_size=64):
    loader = {}
    loader['train'] = dataloader.DataLoader(data['train'],
                                            batch_size=batch_size,
                                            shuffle=True)

    loader['test'] = dataloader.DataLoader(data['test'], batch_size=batch_size)

    return loader
Example #21
0
    def __init__(self,
                 data="veri",
                 size=(288, 288),
                 sampler="triple",
                 mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225]):
        train_transform = transforms.Compose([
            transforms.Resize(size, interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std),
            RandomErasing(probability=0.5, mean=mean)
        ])

        test_transform = transforms.Compose([
            transforms.Resize(size, interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        Dataset = {
            'veri': VeRi,
            "vehicleid": VehicleID,
            'market': Market1501,
            'msmt': MSMT17
        }

        self.trainset = Dataset[data](train_transform, 'train', opt.data_path)
        self.testset = Dataset[data](test_transform, 'test', opt.data_path)
        self.queryset = Dataset[data](test_transform, 'query', opt.data_path)
        self.nums = self.trainset.len

        if sampler == None:
            self.train_loader = dataloader.DataLoader(self.trainset,
                                                      batch_size=opt.batchsize,
                                                      shuffle=True,
                                                      num_workers=4,
                                                      pin_memory=True)
        elif sampler == "triple":
            self.train_loader = dataloader.DataLoader(
                self.trainset,
                sampler=RandomSampler(self.trainset,
                                      batch_id=opt.batchid,
                                      batch_image=opt.batchimage),
                batch_size=opt.batchid * opt.batchimage,
                num_workers=4,
                pin_memory=True)
        self.test_loader = dataloader.DataLoader(self.testset,
                                                 batch_size=opt.batchtest,
                                                 shuffle=False,
                                                 num_workers=4,
                                                 pin_memory=True)
        self.query_loader = dataloader.DataLoader(self.queryset,
                                                  batch_size=opt.batchtest,
                                                  shuffle=False,
                                                  num_workers=4,
                                                  pin_memory=True)
Example #22
0
def prepare_data_loader():
    print("train_data:", train_data.train_data.size())
    print("train_labels:", train_data.train_labels.size())
    print("test_data:", test_data.test_data.size())

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=64,
                                   shuffle=True)
    test_loader = Data.DataLoader(dataset=test_data, batch_size=64)

    return train_loader, test_loader
Example #23
0
def combine_datasources(dset, dset_extra, valid_size=0, shuffle=True, random_seed=2019,
                        maxsize=None, device='cpu'):
    """ Combine two datasets.

    Extends dataloader with additional data from other dataset(s). Note that we
    add the examples in dset only to train (no validation)

    Arguments:
        dset (DataLoader): first dataloader
        dset_extra (DataLoader): additional dataloader
        valid_size (float): fraction of data use for validation fold
        shiffle (bool): whether to shuffle train data
        random_seed (int): random seed
        maxsize (int): maximum number of examples in either train or validation loader
        device (str): device for data loading

    Returns:
        train_loader_ext (DataLoader): train dataloader for combined data sources
        valid_loader_ext (DataLoader): validation dataloader for combined data sources

    """
    if shuffle == True and random_seed:
        np.random.seed(random_seed)

    ## Convert both to TensorDataset
    if isinstance(dset, torch.utils.data.DataLoader):
        dataloader_args = {k:getattr(dset, k) for k in ['batch_size', 'num_workers']}
        X, Y = load_full_dataset(dset, targets=True, device=device)
        d = int(np.sqrt(X.shape[1]))
        X = X.reshape(-1, 1, d, d)
        dset = torch.utils.data.TensorDataset(X, Y)
        logger.info(f'Main data size. X: {X.shape}, Y: {Y.shape}')
    elif isinstance(dst, torch.utils.data.Dataset):
        raise NotImplemented('Error: combine_datasources cant take Datasets yet.')

    merged_dset = torch.utils.data.ConcatDataset([dset, dset_extra])
    train_idx, valid_idx = random_index_split(len(dset), 1-valid_size, (maxsize, None)) # No maxsize for validation
    train_idx = np.concatenate([train_idx, np.arange(len(dset_extra)) + len(dset)])

    if shuffle:
        train_sampler = SubsetRandomSampler(train_idx)
        valid_sampler = SubsetRandomSampler(valid_idx)
    else:
        train_sampler = SubsetSampler(train_idx)
        valid_sampler = SubsetSampler(valid_idx)

    train_loader_ext  = dataloader.DataLoader(merged_dset, sampler =  train_sampler, **dataloader_args)
    valid_loader_ext  = dataloader.DataLoader(merged_dset, sampler =  valid_sampler, **dataloader_args)

    logger.info(f'Fold Sizes: {len(train_idx)}/{len(valid_idx)} (train/valid)')

    return train_loader_ext, valid_loader_ext
Example #24
0
def train():
    dataset = subDataset()

    # todo 创建DataLoader迭代器
    dataloader = DataLoader.DataLoader(
        dataset,
        batch_size=16,
        shuffle=True,
        num_workers=1,
    )
    valloader = DataLoader.DataLoader(
        dataset,
        batch_size=32,
        shuffle=True,
        num_workers=1,
    )
    net = switchNet()
    net.train()
    lr = 0.001  # todo 学习率
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), weight_decay=1e-5, lr=lr)
    epoch = 2  # todo 迭代次数   (所有的数据训练一遍 称为 迭代一次)
    for e in range(epoch):
        for i, data in enumerate(dataloader):
            input = data[0]
            label = data[1]
            input = torch.tensor(input, dtype=torch.float32).permute(
                (0, 3, 1, 2))

            label = torch.tensor(label)
            optimizer.zero_grad()
            out = net(input)
            _, predicted = torch.max(out.data, 1)
            correct = (predicted == label.long()).sum()
            loss = criterion(out, label.long())

            loss.backward()
            optimizer.step()
            for i, data in enumerate(valloader):
                input = data[0]
                label = data[1]
                input = torch.tensor(input, dtype=torch.float32).permute(
                    (0, 3, 1, 2))

                label = torch.tensor(label)

                out = net(input)
                _, predicted = torch.max(out.data, 1)
                correct = (predicted == label.long()).sum()
                print(correct.item())
                break
    torch.save(net.state_dict(), "./left_switch_net.pth")  #todo  模型保存
Example #25
0
    def __init__(self, args):
        train_transform_list = [
            transforms.Resize((args.height, args.width), interpolation=3),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]

        if args.random_erasing:
            train_transform_list.append(
                RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0]))

        train_transform = transforms.Compose(train_transform_list)

        test_transform = transforms.Compose([
            transforms.Resize((args.height, args.width), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        # for market1501 dataset
        if not args.test:
            self.trainset = market1501.Market1501(args, train_transform,
                                                  'train')
            self.train_loader = dataloader.DataLoader(
                self.trainset,
                batch_size=args.batchsize,
                sampler=RandomSampler(self.trainset, args.batchid,
                                      args.batchsize // args.batchid),
                num_workers=args.worker,
                drop_last=False,
                pin_memory=True)
        else:
            self.trainset = None
            self.train_loader = None

        self.testset = market1501.Market1501(args, test_transform, 'test')
        self.queryset = market1501.Market1501(args, test_transform, 'query')

        self.test_loader = dataloader.DataLoader(self.testset,
                                                 batch_size=args.batchtest,
                                                 num_workers=args.worker,
                                                 drop_last=False,
                                                 pin_memory=True)
        self.query_loader = dataloader.DataLoader(self.queryset,
                                                  batch_size=args.batchtest,
                                                  num_workers=args.worker,
                                                  drop_last=False,
                                                  pin_memory=True)
Example #26
0
    def __init__(self, args):
        self.loader_train = None

        # Go here if want to train
        if not args.test_only:
            datasets = []
            for d in args.data_train:
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
                m = import_module('data.' + module_name.lower())
                datasets.append(getattr(m, module_name)(args, name=d))

            self.loader_train = dataloader.DataLoader(
                MyConcatDataset(datasets),
                batch_size=args.batch_size,
                shuffle=True,
                pin_memory=not args.cpu,
                num_workers=args.n_threads,
            )

        self.loader_test = []
        for d in args.data_test:
            if d in ['Set5', 'Set14', 'B100', 'Urban100']:
                m = import_module('data.benchmark')
                testset = getattr(m, 'Benchmark')(args, train=False, name=d)
            else:

                # GO here since data_test == 'Demo
                module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'

                # It imports all the modules from the file in the directory corresponding to d
                # here - d = demo.py --> imports files from src/data.py 
                # Can also import from other files in directory e.g. common.py or div2k.py!
                m = import_module('data.' + module_name.lower())
                

                # Assign to testset all attributes in demo.Demo class 
                # testset type is data.demo.Demo
                # passed into it we have the idx_scale and other args
                testset = getattr(m, module_name)(args, train=False, name=d)

            self.loader_test.append(
                # Call Data loader - Load demo data into data loader
                # Built in pytorch function
                dataloader.DataLoader(
                    testset,
                    batch_size=1,
                    shuffle=False,
                    pin_memory=not args.cpu,
                    num_workers=args.n_threads,
                )
            )
Example #27
0
def load_cifar_data(valid_size=0.1,
                    shuffle=True,
                    resize=None,
                    random_seed=2008,
                    batch_size=64,
                    num_workers=1):
    """
        We return train and test for plots and post-training experiments
    """
    transf_seq = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ]
    if resize and (resize[0] != 32 or resize[1] != 32):
        transf_seq.insert(0, transforms.Resize(resize))

    transform = transforms.Compose(transf_seq)
    # normalized according to pytorch torchvision guidelines https://chsasank.github.io/vision/models.html
    train = CIFAR10('data/CIFAR',
                    train=True,
                    download=True,
                    transform=transform)
    test = CIFAR10('data/CIFAR',
                   train=False,
                   download=True,
                   transform=transform)

    num_train = len(train)
    indices = list(range(num_train))
    split = int(np.floor(valid_size * num_train))
    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    if shuffle == True:
        np.random.seed(random_seed)
        np.random.shuffle(indices)

    # Create DataLoader
    dataloader_args = dict(batch_size=batch_size, num_workers=num_workers)
    train_loader = dataloader.DataLoader(train,
                                         sampler=train_sampler,
                                         **dataloader_args)
    valid_loader = dataloader.DataLoader(train,
                                         sampler=valid_sampler,
                                         **dataloader_args)
    dataloader_args['shuffle'] = False
    test_loader = dataloader.DataLoader(test, **dataloader_args)

    return train_loader, valid_loader, test_loader, train, test
Example #28
0
    def __init__(self, args):

        transform = {
            'train':
            transforms.Compose([
                transforms.Resize((args.height, args.width), interpolation=3),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ]),
            'test':
            transforms.Compose([
                transforms.Resize((args.height, args.width), interpolation=3),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ]),
            'query':
            transforms.Compose([
                transforms.Resize((args.height, args.width), interpolation=3),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
        }

        self.transforms = transform

        self.dataset = {
            name: Dataset(args, transform[name], name)
            for name in ['train', 'test', 'query']
        }

        self.dataloader = {}
        self.dataloader['train'] = dataloader.DataLoader(
            self.dataset['train'],
            shuffle=True,
            batch_size=args.batchtrain,
            num_workers=args.nThread)
        self.dataloader['test'] = dataloader.DataLoader(
            self.dataset['test'],
            shuffle=False,
            batch_size=args.batchtest,
            num_workers=args.nThread)
        self.dataloader['query'] = dataloader.DataLoader(
            self.dataset['query'],
            shuffle=False,
            batch_size=args.batchtest,
            num_workers=args.nThread)
def loadData(train, test):
    """"Loading the dataset from memory"""

    train_loader = dataloader.DataLoader(train,
                                         batch_size=16,
                                         shuffle=True,
                                         num_workers=2)

    test_loader = dataloader.DataLoader(test,
                                        batch_size=16,
                                        shuffle=False,
                                        num_workers=2)

    return train_loader, test_loader
def makeDataLoader(config):
    # setup data_set, data_process instances
    train_set = config.init_obj('train_set', module_data_process)
    valid_set = config.init_obj('valid_set', module_data_process)
    test_set = config.init_obj('test_set', module_data_process)

    # print('train num:{}\t valid num:{}\t test num:{}'.format(len(train_set),len(valid_set),len(test_set)))
    train_dataloader = module_dataloader.DataLoader(train_set, batch_size=train_set.batch_size,
                                                    num_workers=train_set.num_workers, collate_fn=train_set.collate_fn)
    valid_dataloader = module_dataloader.DataLoader(valid_set, batch_size=valid_set.batch_size,
                                                    num_workers=valid_set.num_workers, collate_fn=valid_set.collate_fn)
    test_dataloader = module_dataloader.DataLoader(test_set, batch_size=test_set.batch_size,
                                                    num_workers=test_set.num_workers, collate_fn=test_set.collate_fn)

    return train_dataloader, valid_dataloader,test_dataloader