예제 #1
0
def get_data():
    dataset_test = DatasetFolder(root='./patterns_test/',
                                 loader=load_audio,
                                 extensions='.wav')

    dataset = DatasetFolder(root='./patterns/',
                            loader=load_audio,
                            extensions='.wav')

    data = [torch.as_tensor(d[0]) for d in dataset]
    data = padding_tensor(data)
    targets = torch.as_tensor(dataset.targets)
    tensor_dataset = TensorDataset(targets, data)
    dataset_size = int(len(tensor_dataset) * 0.55)
    indices = list(range(dataset_size))
    np.random.shuffle(indices)
    #Test
    data_test = [torch.as_tensor(d[0]) for d in dataset_test]
    data_test = padding_tensor(data_test)
    targets_test = torch.as_tensor(dataset_test.targets)
    tensor_dataset_test = TensorDataset(targets_test, data_test)
    dataset_size_test = int(len(tensor_dataset_test))
    indices_test = list(range(dataset_size_test))
    np.random.shuffle(indices_test)
    ####
    train_sampler = SubsetRandomSampler(indices)
    test_sampler = SubsetRandomSampler(indices_test)
    trainloader = DataLoader(tensor_dataset,
                             sampler=train_sampler,
                             batch_size=128)
    testloader = DataLoader(tensor_dataset,
                            sampler=test_sampler,
                            batch_size=128)

    return trainloader, testloader
예제 #2
0
def main(config):

    fix_seeds(0)  # set a  random seed for reproducibility

    train_tfm = transforms.Compose([
        # Resize the image into a fixed shape (height = width = 128)
        transforms.Resize((128, 128)),
        transforms.ToTensor(),
    ])

    test_tfm = transforms.Compose([
        transforms.Resize((128, 128)),
        transforms.ToTensor(),
    ])

    # Construct datasets.
    train_set = DatasetFolder("food-11/training/labeled",
                              loader=lambda x: Image.open(x),
                              extensions="jpg",
                              transform=train_tfm)
    valid_set = DatasetFolder("food-11/validation",
                              loader=lambda x: Image.open(x),
                              extensions="jpg",
                              transform=test_tfm)
    unlabeled_set = DatasetFolder("food-11/training/unlabeled",
                                  loader=lambda x: Image.open(x),
                                  extensions="jpg",
                                  transform=train_tfm)
    test_set = DatasetFolder("food-11/testing",
                             loader=lambda x: Image.open(x),
                             extensions="jpg",
                             transform=test_tfm)

    # Construct data loaders.
    train_loader = DataLoader(train_set,
                              batch_size=config['batch_size'],
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True)
    valid_loader = DataLoader(valid_set,
                              batch_size=config['batch_size'],
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True)
    test_loader = DataLoader(test_set,
                             batch_size=config['batch_size'],
                             shuffle=False)

    solver = Solver(config)

    # Train
    # solver.train(train_loader, val_loader)

    # Test
    solver.restore_model(f'epoch_80.pt')  # Load best model
    pseudo_train, pseudo_label = solver.test(test_loader)
예제 #3
0
def main(args):
    run = RunManager(args,
                     ignore=('device', 'evaluate', 'no_cuda'),
                     main='model')
    print(run)

    train_dataset = DatasetFolder('data/train',
                                  load_sample, ('.npy', ),
                                  transform=normalize_sample)
    val_dataset = DatasetFolder('data/val',
                                load_sample, ('.npy', ),
                                transform=normalize_sample)

    print(train_dataset)
    print(val_dataset)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=8,
                              shuffle=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            num_workers=8)

    if args.model == '1d-conv':
        model = RectCNN(282)
    else:
        model = PaperCNN()
    model = model.double().to(args.device)

    optimizer = SGD(model.parameters(), lr=1e-2)

    # evaluate(val_loader, model, args)
    best = 0
    progress = trange(1, args.epochs)
    for epoch in progress:
        progress.set_description('TRAIN [CurBestAcc={:.2%}]'.format(best))
        train(train_loader, model, optimizer, args)
        progress.set_description('EVAL [CurBestAcc={:.2%}]'.format(best))
        metrics = evaluate(val_loader, model, args)

        is_best = metrics['acc'] > best
        best = max(metrics['acc'], best)
        if is_best:
            run.save_checkpoint(
                {
                    'epoch': epoch,
                    'params': vars(args),
                    'model': model.state_dict(),
                    'optim': optimizer.state_dict(),
                    'metrics': metrics
                }, is_best)

        metrics.update({'epoch': epoch})
        run.pushLog(metrics)
예제 #4
0
    def __init__(self, *, image_dir, mask_dir):
        super().__init__(image_dir, mask_dir)

        self.image_data = DatasetFolder(root=image_dir,
                                        loader=self.image_loader,
                                        extensions=["jpg"],
                                        transform=ToTensor())

        self.mask_data = DatasetFolder(root=mask_dir,
                                       loader=self.mask_loader,
                                       extensions=["png"])

        self.length = len(self.image_data)
예제 #5
0
 def setup(self, stage):
     
     # Assign train/val datasets for use in dataloaders
     mednist = DatasetFolder(
         root=DATADIR.joinpath('Medical-MNIST-Classification/resized'),
         loader=self._loader,
         extensions='.jpeg'
         )
     gen = torch.Generator()
     gen = gen.manual_seed(self.hparams.seed)
     
     train_n = math.ceil(mednist.__len__() * .7)
     val_n = test_n = math.floor(mednist.__len__() * .15)
     
     self.train_set, self.val_set, self.test_set = random_split(mednist, [train_n, val_n, test_n], generator=gen)
예제 #6
0
def get_data():
    dataset = DatasetFolder(root='./patterns/',
                            loader=load_audio,
                            extensions='.wav')

    data = [torch.as_tensor(d[0]) for d in dataset]
    data = padding_tensor(data)
    targets = torch.as_tensor(dataset.targets)
    tensor_dataset = TensorDataset(targets, data)
    valid_size = 0.2
    dataset_size = int(len(tensor_dataset))
    indices = list(range(dataset_size))
    split = int(valid_size * dataset_size)
    np.random.shuffle(indices)
    train_idx, test_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    test_sampler = SubsetRandomSampler(test_idx)
    trainloader = DataLoader(tensor_dataset,
                             sampler=train_sampler,
                             batch_size=32)
    testloader = DataLoader(tensor_dataset,
                            sampler=test_sampler,
                            batch_size=32)

    return trainloader, testloader
예제 #7
0
    def process(self):
        if self.mode == 'dev':
            self.target = pd.read_csv(self.raw_paths[1],
                                      index_col=0,
                                      usecols=[
                                          'gdb_idx',
                                      ] +
                                      ['property_%d' % x for x in range(12)])
            self.target = self.target[['property_%d' % x for x in range(12)]]

        _dataset = DatasetFolder(root=self.raw_paths[0],
                                 loader=self.sdf_graph_reader,
                                 extensions='sdf',
                                 transform=self.pre_transform)

        data_list = []
        for alchemy_data, _ in _dataset:
            data_list.append(alchemy_data)

        if self.pre_filter is not None:
            data_list = [data for data in data_list if self.pre_filter(data)]

        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
        if self.mode == 'dev':
            torch.save(_dataset.targets, self.processed_paths[1])
예제 #8
0
def compute_dataset_mean_std():
    load = lambda x: torch.from_numpy(np.load(x))
    dataset = DatasetFolder('data/train', load, ('.npy', ))
    loader = DataLoader(dataset, batch_size=64, num_workers=8)

    sum = torch.zeros_like(dataset[0][0]).to('cuda')
    ntotal = 0

    progress = tqdm(loader)
    progress.set_description('Computing MEAN')
    for x, y in progress:
        x = x.to('cuda')
        sum += x.sum(0)
        ntotal += y.shape[0]

    mean = (sum / ntotal).mean()
    print(mean)

    sum = 0
    progress = tqdm(loader)
    progress.set_description('Computing STD')
    for x, y in progress:
        x = x.to('cuda')
        sum += ((x - mean)**2).sum(0)

    std = torch.sqrt((sum / ntotal).mean())
    print(std)
예제 #9
0
    def __init__(self, folder, cache, min_len=11):
        dataset = DatasetFolder(folder, npy_loader, extensions=('.npy', ))
        self.total_frames = 0
        self.lengths = []
        self.arrays = []

        if cache is not None and os.path.exists(cache):
            with open(cache, 'rb') as f:
                self.arrays, self.lengths = pickle.load(f)
        else:
            for idx, (data, categ) in enumerate(
                    tqdm.tqdm(dataset,
                              desc="Counting total number of frames",
                              leave=False)):
                array_path, _ = dataset.samples[idx]
                video, _ = data
                length = len(video)
                if length >= min_len:
                    self.arrays.append((array_path, categ))
                    self.lengths.append(length)
            if cache is not None:
                with open(cache, 'wb') as f:
                    pickle.dump((self.arrays, self.lengths), f)

        self.cumsum = np.cumsum([0] + self.lengths)
        print(("Total number of frames {}".format(np.sum(self.lengths))))
예제 #10
0
def acquire_datasets(images_dir, do_transforms):
    # Load dataset. This function is specific for the NORB data set

    train_dataset = DatasetFolder(images_dir + "/train_npy/",
                                  extensions="npy",
                                  loader=partial(image_loader, do_transforms))

    test_dataset = DatasetFolder(images_dir + "/test_npy/",
                                 extensions="npy",
                                 loader=partial(image_loader, do_transforms))

    # Split train dataset to train/val
    train_size = int(0.8 * len(train_dataset))
    val_size = len(train_dataset) - train_size
    train_set, val_set = random_split(train_dataset, [train_size, val_size])
    return train_set, val_set, test_dataset
예제 #11
0
def gen_train_valid_test_split(root, by="random", ratio=(0.5, 0.2, 0.3)):
    assert by in ['random']
    seed = random.random()
    meta_data = {
        "ratio": ratio,
        "seed": seed,
    }
    random.seed(seed)
    all_data = DatasetFolder(root, loader=lambda x: x, extensions=(".mp4", ))
    # video_clips = {}
    if by == "random":
        ratio = list(int(r * len(all_data)) for r in ratio)
        ratio[0] += len(all_data) - sum(ratio)
        split = random_split(all_data, ratio)
        for spl, ds in enumerate(split):
            meta_data[f'split_{spl}'] = list(p for p, _ in ds)
        if len(split) == 2:
            meta_data['train'] = meta_data.pop('split_0')
            meta_data['test'] = meta_data.pop('split_1')
        elif len(split) == 3:
            meta_data['train'] = meta_data.pop('split_0')
            meta_data['valid'] = meta_data.pop('split_1')
            meta_data['test'] = meta_data.pop('split_2')

    with open(join(root, f"{by}_split.json"), 'w') as f:
        json.dump(meta_data, f)
예제 #12
0
def test(args):
    run = RunManager(args,
                     ignore=('device', 'evaluate', 'no_cuda'),
                     main='model')
    print(run)

    if args.model == '1d-conv':
        model = RectCNN(282)
    else:
        model = PaperCNN()
    model = model.double()

    print("Loading: {}".format(run.ckpt('best')))
    checkpoint = torch.load(run.ckpt('best'))
    model.load_state_dict(checkpoint['model'])
    model = model.to(args.device)

    test_dataset = DatasetFolder('data/test',
                                 load_sample, ('.npy', ),
                                 transform=normalize_sample)
    print(test_dataset)

    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             num_workers=8)

    test_metrics = evaluate(test_loader, model, args)
    run.writeResults(test_metrics)
예제 #13
0
def _load_imagenet_dogs(root, formats, train_transforms, test_transforms):
    '''Load the ImageNetDogs dataset from disk'''
    from torchvision.datasets import DatasetFolder
    from torch.utils.data import random_split
    from PIL import Image
    import copy

    def loader(path):
        '''Load image from fs path'''
        return Image.open(path)

    dataset = DatasetFolder(root, loader, formats)

    size = len(dataset)
    size_train = int(0.7 * size)
    size_test = size - size_train
    dataset_train, dataset_test = random_split(dataset,
                                               [size_train, size_test])

    # set the transforms
    # we need to make a copy for the test set to have different transforms
    dataset_test.dataset = copy.copy(dataset)
    dataset_train.dataset.transform = train_transforms
    dataset_test.dataset.transform = test_transforms
    return dataset_train, dataset_test
예제 #14
0
    def __init__(self, images_path, masks_path, image_size=224):
        self.images_path = images_path
        self.masks_path = masks_path
        self.image_size = (image_size, image_size)

        preprocess = Compose([ToTensor()])

        self.images = DatasetFolder(root=images_path,
                                    loader=self.iCoSegImageLoader,
                                    extensions=("jpg"),
                                    transform=preprocess)
        self.masks = DatasetFolder(
            root=masks_path,
            loader=self.iCoSegMaskLoader,
            extensions=("png"),
        )

        self.length = len(self.images)
예제 #15
0
def prep_dataloader(batch_size, data_root='./food-11/'):

    # It is important to do data augmentation in training.
    # However, not every augmentation is useful.
    # Please think about what kind of augmentation is helpful for food recognition.
    train_tfm = transforms.Compose([
        # Resize the image into a fixed shape (height = width = 128)
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(p=0.5),   #随机水平翻转 选择一个概率
        transforms.RandomRotation(45),            #随机旋转,-45到45度之间随机选
        transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),#参数1为亮度,参数2为对比度,参数3为饱和度,参数4为色相
        transforms.RandomGrayscale(p=0.025),      #概率转换成灰度率,3通道就是R=G=B
        # You may add some transforms here.
        # ToTensor() should be the last one of the transforms.
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])#均值,标准差
    ])

    # We don't need augmentations in testing and validation.
    # All we need here is to resize the PIL image and transform it into Tensor.
    test_tfm = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])#均值,标准差
    ])

    # Construct datasets.
    # The argument "loader" tells how torchvision reads the data.
    train_set_path = data_root + "training/labeled"
    valid_set_path = data_root + "validation"
    unlabeled_set_path = data_root + "training/unlabeled"
    test_set_path = data_root + "testing"
    train_set = DatasetFolder(train_set_path, loader=lambda x: Image.open(x), extensions="jpg", transform=train_tfm)
    valid_set = DatasetFolder(valid_set_path, loader=lambda x: Image.open(x), extensions="jpg", transform=test_tfm)
    unlabeled_set = DatasetFolder(unlabeled_set_path, loader=lambda x: Image.open(x), extensions="jpg", transform=train_tfm)
    test_set = DatasetFolder(test_set_path, loader=lambda x: Image.open(x), extensions="jpg", transform=test_tfm)

    # Construct data loaders.
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True)
    valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    return train_loader, valid_loader, test_loader, train_set, valid_set, unlabeled_set, test_set
예제 #16
0
def get_audio_dataset(datafolder,
                      max_length_in_seconds=1,
                      pad_and_truncate=False):
    loader_func = partial(
        audio_loader,
        max_length_in_seconds=max_length_in_seconds,
        pad_and_truncate=pad_and_truncate,
    )
    dataset = DatasetFolder(datafolder, loader_func, ".wav")

    return dataset
예제 #17
0
def get_dataloader(input_dir, input_ext, batch_size):
    # set target_spec_length to be some large number to avoid truncation
    audio_conf = {'use_raw_length': False, 'target_spec_length': 10000}
    def load_mel_spectrogram_and_path(path):
        y, sr = librosa.load(path, 16000)
        logmelspec, n_frames = compute_spectrogram(y, sr, audio_conf)
        return logmelspec, n_frames, path

    dset = DatasetFolder(input_dir, load_mel_spectrogram_and_path, (input_ext,))
    loader = DataLoader(dset, batch_size=batch_size, shuffle=False, 
                        num_workers=8, pin_memory=True)
    return loader
 def __init__(self, root):
     self.dataset_folder = DatasetFolder(
         root=root,
         loader=FBanksTripletDataset._npy_loader,
         extensions='.npy')
     self.len_ = len(self.dataset_folder.samples)
     bin_counts = np.bincount(self.dataset_folder.targets)
     self.num_classes = len(self.dataset_folder.classes)
     self.label_to_index_range = {}
     start = 0
     for i in range(self.num_classes):
         self.label_to_index_range[i] = (start, start + bin_counts[i])
         start = start + bin_counts[i]
예제 #19
0
def prepare_dataset(root_dir: Path, bands: slice):
    def is_valid_hsi_file(file_path: str):
        file_path = Path(file_path)
        return file_path.parent.name in _allowed_classes and file_path.suffix == ".npy"

    dataset = DatasetFolder(
        root=str(root_dir),
        loader=image_loader(bands),
        is_valid_file=is_valid_hsi_file,
        transform=ToTensor(),
    )

    return dataset
예제 #20
0
def acquire_datasets(images_dir, device, do_transforms):
    # Load dataset
    dataset = DatasetFolder(images_dir,
                            extensions="npy",
                            loader=partial(image_loader, device,
                                           do_transforms))

    # Split dataset to train/test
    train_size = int(0.7 * len(dataset))
    test_size = int((len(dataset) - train_size) / 2)
    val_size = len(dataset) - train_size - test_size
    train_set, test_set, val_set = random_split(
        dataset, [train_size, test_size, val_size])
    return train_set, test_set, val_set
예제 #21
0
    def prepare_data(self):
        """Prepare and save dataset as TensorDataset to improve training speed.
        """
        self.generic_dataset = LIDCNodulesDataset(**self.dataset_params.params)
        log.info(f"DATASET SIZE: {len(self.generic_dataset)}")

        tensor_dataset_path = self.__prepare_tensor_dataset()

        self.dataset = DatasetFolder(tensor_dataset_path, torch.load, ("pt"))
        self.dataset.norm = self.generic_dataset.norm

        train_inds, val_inds, test_inds = H.train_val_holdout_split(self.dataset)
        self.train_sampler = SubsetRandomSampler(train_inds)
        self.val_sampler = SubsetRandomSampler(val_inds)
        self.test_subset = Subset(self.dataset, test_inds)
예제 #22
0
    def prepare_data(self):
        """Prepare and save dataset as TensorDataset to improve training speed.
        """
        self.generic_dataset = LIDCNodulesDataset(**self.dataset_params.params)
        log.info(f"DATASET SIZE: {len(self.generic_dataset)}")

        self.tensor_dataset_path = self.__prepare_tensor_dataset()

        self.aug_transform = transforms.Compose(
            [T.FlipNodule3D(), T.RotNodule3D()])
        self.dataset = DatasetFolder(self.tensor_dataset_path,
                                     torch.load, ("pt"),
                                     transform=self.__data_transform)
        self.dataset.norm = self.generic_dataset.norm

        train_inds, val_inds, test_inds = H.train_val_holdout_split(
            self.dataset, ratios=[0.85, 0.14, 0.01])
        self.train_sampler = SubsetRandomSampler(train_inds)
        self.val_sampler = SubsetRandomSampler(val_inds)
        self.test_subset = Subset(self.dataset, test_inds)
    def _fit(self, agent_type_id: int):
        trajectories = DatasetFolder(
            f'{self.experiment_folder}/trajectories/agent_{agent_type_id}/',
            extensions=['.pt'],
            loader=torch.load)
        if self.full_retrain:
            discriminator = Discriminator(self.in_channels,
                                          self.channels,
                                          self.fc_size,
                                          self.hidden_size,
                                          n_classes=self.num_pool).cuda()
            optimiser = optim.Adam(discriminator.parameters(),
                                   weight_decay=self.weight_decay)
        else:
            discriminator = self.discriminators[agent_type_id]
            optimiser = self.optimisers[agent_type_id]

        discriminator.train()

        print(
            'Training discriminator for agent_type {} with {} samples'.format(
                agent_type_id, len(trajectories)))
        training_logs = {}
        for epoch in range(self.epochs):
            mean_loss = 0
            for x, y in trajectories:
                optimiser.zero_grad()
                y_pred, _ = discriminator(x)
                loss = loss_fn(y_pred, y)
                loss.backward()
                optimiser.step()

                mean_loss += loss.item() / len(trajectories)

            training_logs.update({f'epoch_{epoch}': mean_loss})
            print('Epoch {}: {:.2f}'.format(epoch, mean_loss))

        self.discriminators[agent_type_id] = discriminator

        return training_logs
예제 #24
0
    def __log_embeddings(self):
        dataset = DatasetFolder(self.tensor_dataset_path, torch.load, ("pt"))
        embeds, labels, imgs = [], [], []
        for sample in DataLoader(dataset, batch_size=64):
            img, label = sample[0]["nodule"], sample[0]["texture"]
            img = img[:, :, img.size(2) // 2, :, :]
            img, label = img.to(self.curr_device), label.to(self.curr_device)

            embeds.append(self.model.embed(img, labels=label))
            labels.append(label)

            min, max = self.dataset_params.params["ct_clip_range"]
            img_in_hu = self.generic_dataset.norm.denorm(img)
            img_in_01 = img_in_hu.add(-min).div(max - min + 1e-5)
            imgs.append(img_in_01)

        embeds = torch.cat(embeds, dim=0)
        labels = torch.cat(labels, dim=0).tolist()
        imgs = torch.cat(imgs, dim=0)
        self.logger.experiment.add_embedding(embeds,
                                             metadata=labels,
                                             label_img=imgs,
                                             global_step=self.global_step)
예제 #25
0
    def __init__(self, conf, inference=False):
        print(conf)

        # -----------   define model --------------- #
        build_model = PreBuildConverter(in_channels=1,
                                        out_classes=2,
                                        add_soft_max=True)
        self.models = []
        for _ in range(conf.n_models):
            self.models.append(
                build_model.get_by_str(conf.net_mode).to(conf.device))
        print('{} {} models generated'.format(conf.n_models, conf.net_mode))

        # ------------  define loaders -------------- #
        dloader_args = {
            'batch_size': conf.batch_size,
            'pin_memory': True,
            'num_workers': conf.num_workers,
            'drop_last': False,  # check that it fits in mem
            'shuffle': True
        }

        grey_loader = partial(cv2.imread, flags=cv2.IMREAD_GRAYSCALE)
        file_ext = ('.png', )
        im_trans = conf.im_transform
        self.dataset = DatasetFolder(conf.train_folder,
                                     extensions=file_ext,
                                     loader=grey_loader,
                                     transform=im_trans)
        self.train_loader = DataLoader(self.dataset, **dloader_args)

        self.test_ds = DatasetFolder(conf.test_folder,
                                     extensions=file_ext,
                                     loader=grey_loader,
                                     transform=im_trans)
        self.test_loader = DataLoader(self.test_ds, **dloader_args)

        if conf.morph_dir:
            self.morph_ds = DatasetFolder(conf.morph_dir,
                                          extensions=file_ext,
                                          loader=grey_loader,
                                          transform=im_trans)
            self.morph_loader = DataLoader(self.morph_ds, **dloader_args)
        else:
            self.morph_loader = []

        # ------------  define params -------------- #
        self.inference = inference
        if not inference:
            self.milestones = conf.milestones
            self.writer = SummaryWriter(conf.log_path)
            self.step = 0
            print('two model heads generated')

            paras_only_bn = []
            paras_wo_bn = []
            for model in self.models:
                paras_only_bn_, paras_wo_bn_ = separate_bn_paras(model)
                paras_only_bn.append(paras_only_bn_)
                paras_wo_bn.append(paras_wo_bn_)

            self.optimizer = optim.SGD([{
                'params': paras_wo_bn[model_num],
                'weight_decay': 5e-4
            } for model_num in range(conf.n_models)] +
                                       [{
                                           'params': paras_only_bn[model_num]
                                       }
                                        for model_num in range(conf.n_models)],
                                       lr=conf.lr,
                                       momentum=conf.momentum)
            print(self.optimizer)

            print('optimizers generated')
            self.board_loss_every = max(len(self.train_loader) // 5, 1)
            self.evaluate_every = conf.evaluate_every
            self.save_every = max(conf.epoch_per_save, 1)
            assert self.save_every >= self.evaluate_every
        else:
            self.threshold = conf.threshold
def main():
    train_set = DatasetFolder("pose_data/training/labeled",
                              loader=lambda x: Image.open(x),
                              extensions="bmp",
                              transform=train_tfm)
    test_set = DatasetFolder("pose_data/testing",
                             loader=lambda x: Image.open(x),
                             extensions="bmp",
                             transform=test_tfm)

    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ResNet_cifar10(num_classes=3, block=BasicBlock, depth=18)
    model = model.to(device)
    print(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(num_epoch):
        running_loss = 0.0
        total = 0
        correct = 0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()

            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            total += labels.size(0)
            _, predicted = torch.max(outputs.data, 1)
            #print(predicted)
            #print("label",labels)
            correct += (predicted == labels).sum().item()
        train_acc = correct / total

        print(
            f"[ Train | {epoch + 1:03d}/{num_epoch:03d} ] loss = {running_loss:.5f}, acc = {train_acc:.5f}"
        )

    model.eval()

    with torch.no_grad():
        correct = 0
        total = 0

        for i, data in enumerate(test_loader):
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            #print(predicted)
            #print("labels:",labels)
        print('Test Accuracy:{} %'.format((correct / total) * 100))
def main():
    train_set = DatasetFolder("./dataset/data_0705/lepton/train",
                              loader=lambda x: Image.open(x),
                              extensions="bmp",
                              transform=train_tfm)
    test_set = DatasetFolder("./dataset/data_0705/lepton/test",
                             loader=lambda x: Image.open(x),
                             extensions="bmp",
                             transform=test_tfm)

    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = models.resnet18()
    model.conv1 = nn.Conv2d(1,
                            64,
                            kernel_size=3,
                            stride=2,
                            padding=3,
                            bias=False)
    model.fc = nn.Linear(512, 3)
    model = model.to(device)
    print(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(num_epoch):
        ##Training
        running_loss = 0.0
        total = 0
        correct = 0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()

            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            total += labels.size(0)
            _, predicted = torch.max(outputs.data, 1)
            #print(predicted)
            #print("label",labels)
            correct += (predicted == labels).sum().item()
        train_acc = correct / total

        print(
            f"[ Train | {epoch + 1:03d}/{num_epoch:03d} ] loss = {running_loss:.5f}, acc = {train_acc:.5f}"
        )

    ##Testing
    model.eval()

    with torch.no_grad():
        correct = 0
        total = 0

        for i, data in enumerate(test_loader):
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            #print(predicted)
            #print("labels:",labels)
        print('Test Accuracy:{} %'.format((correct / total) * 100))

if __name__ == '__main__':
    root = sys.argv[1]
    name = sys.argv[2]

    IMG_EXTENSIONS = (
        '.jpg',
        '.jpeg',
        '.png',
        '.ppm',
        '.bmp',
        '.pgm',
        '.tif',
        '.tiff',
        '.webp',
    )

    dset = DatasetFolder(root, file_read, IMG_EXTENSIONS)

    with lmdb.open(f'{name}.lmdb', map_size=1024**4, readahead=False) as env:
        for i in tqdm(range(len(dset))):
            img, class_id = dset[i]
            class_byte = str(class_id).zfill(4).encode('utf-8')

            with env.begin(write=True) as txn:
                txn.put(str(i).encode('utf-8'), class_byte + img)

        with env.begin(write=True) as txn:
            txn.put(b'length', str(len(dset)).encode('utf-8'))
def main():
    train_set = DatasetFolder("./dataset/data_0711/grideye/train",
                              loader=lambda x: Image.open(x),
                              extensions="bmp",
                              transform=train_tfm)
    test_set = DatasetFolder("./dataset/data_0711/grideye/test",
                             loader=lambda x: Image.open(x),
                             extensions="bmp",
                             transform=test_tfm)
    val_set = DatasetFolder("./dataset/data_0711/grideye/train",
                            loader=lambda x: Image.open(x),
                            extensions="bmp",
                            transform=test_tfm)

    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=True)

    save_path = 'models.ckpt'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Classifier().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
    criterion = nn.CrossEntropyLoss()
    best_accuracy = 0.0
    for epoch in range(num_epoch):
        running_loss = 0.0
        total = 0
        correct = 0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            #print(labels)
            optimizer.zero_grad()

            outputs = model(inputs)
            #print(outputs.shape)

            loss = criterion(outputs, labels)
            loss.backward()

            for p in list(model.parameters()):
                if hasattr(p, 'org'):
                    p.data.copy_(p.org)

            optimizer.step()

            for p in list(model.parameters()):
                if hasattr(p, 'org'):
                    p.org.copy_(p.data.clamp_(-1, 1))

            running_loss += loss.item()
            total += labels.size(0)
            _, predicted = torch.max(outputs.data, 1)
            #print(predicted)
            #print("label",labels)
            correct += (predicted == labels).sum().item()
        train_acc = correct / total

        print(
            f"[ Train | {epoch + 1:03d}/{num_epoch:03d} ] loss = {running_loss:.5f}, acc = {train_acc:.5f}"
        )

        model.eval()
        with torch.no_grad():
            correct = 0
            total = 0
            for i, data in enumerate(val_loader):
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)
                outputs = model(inputs)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
            val_acc = correct / total

        if val_acc > best_accuracy:
            best_accuracy = val_acc
            torch.save(model.state_dict(), save_path)
            print("Save Model")

        print(
            f"[ Val | {epoch + 1:03d}/{num_epoch:03d} ]  acc = {val_acc:.5f}")

    model = Classifier().to(device)
    model.load_state_dict(torch.load(save_path))
    model.eval()
    stat = np.zeros((3, 3))
    with torch.no_grad():
        correct = 0
        total = 0
        print(model)
        for i, data in enumerate(test_loader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model(inputs)
            #print(outputs.data)
            _, predicted = torch.max(outputs.data, 1)
            #print(predicted)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            for k in range(len(predicted)):
                if predicted[k] != labels[k]:
                    img = inputs[k].mul(255).byte()
                    img = img.cpu().numpy().squeeze(0)
                    img = np.moveaxis(img, 0, -1)

                    predict = predicted[k].cpu().numpy()
                    label = labels[k].cpu().numpy()
                    path = "test_result/predict:" + str(
                        predict) + "_labels:" + str(label) + ".jpg"
                    stat[int(label)][int(predict)] += 1
        ax = sns.heatmap(stat, linewidth=0.5)
        plt.xlabel('Prediction')
        plt.ylabel('Label')
        plt.savefig('heatmap.jpg')
        #print(predicted)
        #print("labels:",labels)
        print('Test Accuracy:{} %'.format((correct / total) * 100))
def main():
	train_set = DatasetFolder("pose_data2/train", loader=lambda x: Image.open(x), extensions="bmp", transform=train_tfm)
	test_set = DatasetFolder("pose_data2/test", loader=lambda x: Image.open(x), extensions="bmp", transform=test_tfm)
	valid_set = DatasetFolder("pose_data2/val", loader=lambda x: Image.open(x), extensions="bmp", transform=test_tfm)

	train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
	test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
	valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True)

	model_path = "model.ckpt"

	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
	model = models.resnet50()
	model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
	model.fc = nn.Linear(2048, 8)
	model = model.to(device)
	print(model)
	optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
	criterion = nn.CrossEntropyLoss()

	best_acc = -1
	for epoch in range(num_epoch):
		##Training
		running_loss = 0.0
		total = 0
		correct = 0
		for  i, data in enumerate(train_loader):
			inputs, labels = data
			inputs = inputs.to(device)
			labels = labels.to(device)
			optimizer.zero_grad()

			outputs = model(inputs)

			loss = criterion(outputs, labels)
			loss.backward()

			optimizer.step()

			running_loss += loss.item()
			total += labels.size(0)
			_,predicted = torch.max(outputs.data,1)
			#print(predicted)
			#print("label",labels)
			correct += (predicted == labels).sum().item()
		train_acc = correct / total

		print(f"[ Train | {epoch + 1:03d}/{num_epoch:03d} ] loss = {running_loss:.5f}, acc = {train_acc:.5f}")
		
		##Validation
		model.eval()

		valid_loss = 0.0
		total = 0
		correct = 0
		for i, data in enumerate(valid_loader):
			inputs, labels = data
			inputs = inputs.to(device)
			labels = labels.to(device)

			with torch.no_grad():
				outputs = model(inputs)
		
			loss = criterion(outputs, labels)
			running_loss += loss.item()
			total += labels.size(0)
			_,predicted = torch.max(outputs.data,1)
			correct += (predicted == labels).sum().item()

		valid_acc = correct / total
		print(f"[ Valid | {epoch + 1:03d}/{num_epoch:03d} ] loss = {running_loss:.5f}, acc = {valid_acc:.5f}")
		if valid_acc > best_acc:
				best_acc = valid_acc
				torch.save(model.state_dict(), model_path)
				print('saving model with acc {:.3f}'.format(valid_acc))



	##Testing
	model = models.resnet50()
	model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
	model.fc = nn.Linear(2048, 8)
	model = model.to(device)
	model.load_state_dict(torch.load(model_path))
	model.eval()

	with torch.no_grad():
		correct = 0
		total = 0

		for i, data in enumerate(test_loader):
			inputs, labels = data
			
			inputs = inputs.to(device)
			labels = labels.to(device)

			outputs = model(inputs)
			_,predicted = torch.max(outputs.data,1)
			
			total += labels.size(0)
			correct += (predicted == labels).sum().item()
			# for k in range(batch_size):
			# 	if predicted[k] != labels[k]:
			# 		print(inputs[k])

			#print(predicted)
			#print("labels:",labels)
		print('Test Accuracy:{} %'.format((correct / total) * 100))