Esempio n. 1
0
def noise(outlier_classes: List[int],
          generated_noise: torch.Tensor,
          norm: torch.Tensor,
          nom_class: int,
          train_set: Dataset,
          gt: bool = False) -> Dataset:
    """
    Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
    :param outlier_classes: a list of all outlier class indices.
    :param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
    :param norm: torch tensor of nominal images (n x c x h x w).
    :param nom_class: the index of the class that is considered nominal.
    :param train_set: some training dataset.
    :param gt: whether to provide ground-truth maps as well, atm not available!
    :return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
    """
    if gt:
        raise ValueError('No GT mode for pure noise available!')
    anom = generated_noise.clamp(0, 255).byte()
    data = torch.cat((norm, anom))
    targets = torch.cat((torch.ones(norm.size(0)) * nom_class,
                         torch.ones(anom.size(0)) * outlier_classes[0]))
    train_set.data = data
    train_set.targets = targets
    return train_set
Esempio n. 2
0
def malformed_normal(outlier_classes: List[int],
                     generated_noise: torch.Tensor,
                     norm: torch.Tensor,
                     nom_class: int,
                     train_set: Dataset,
                     gt: bool = False,
                     brightness_threshold: float = 0.11 * 255) -> Dataset:
    """
    Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
    Unlike above, the noise images are not directly utilized as anomalies, but added to nominal samples to
    create malformed normal anomalies.
    :param outlier_classes: a list of all outlier class indices.
    :param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
    :param norm: torch tensor of nominal images (n x c x h x w).
    :param nom_class: the index of the class that is considered nominal.
    :param train_set: some training dataset.
    :param gt: whether to provide ground-truth maps as well.
    :param brightness_threshold: if the average brightness (averaged over color channels) of a pixel exceeds this
        threshold, the noise image's pixel value is subtracted instead of added.
        This avoids adding brightness values to bright pixels, where approximately no effect is achieved at all.
    :return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
    """
    assert (norm.dim() == 4
            or norm.dim() == 3) and generated_noise.shape == norm.shape
    norm_dim = norm.dim()
    if norm_dim == 3:
        norm, generated_noise = norm.unsqueeze(1), generated_noise.unsqueeze(
            1)  # assuming ch dim is skipped
    anom = norm.clone()

    # invert noise for bright regions (bright regions are considered being on average > brightness_threshold)
    generated_noise = generated_noise.int()
    bright_regions = norm.sum(1) > brightness_threshold * norm.shape[1]
    for ch in range(norm.shape[1]):
        gnch = generated_noise[:, ch]
        gnch[bright_regions] = gnch[bright_regions] * -1
        generated_noise[:, ch] = gnch

    anom = (anom.int() + generated_noise).clamp(0, 255).byte()
    data = torch.cat((norm, anom))
    targets = torch.cat((torch.ones(norm.size(0)) * nom_class,
                         torch.ones(anom.size(0)) * outlier_classes[0]))
    if norm_dim == 3:
        data = data.squeeze(1)
    train_set.data = data
    train_set.targets = targets
    if gt:
        gtmaps = torch.cat((
            torch.zeros_like(norm)[:, 0].float(),  # 0 for nominal
            (norm != anom).max(1)[0].clone().float())  # 1 for anomalous
                           )
        if norm_dim == 4:
            gtmaps = gtmaps.unsqueeze(1)
        return train_set, gtmaps
    else:
        return train_set
Esempio n. 3
0
    def __init__(self,
                 batch_size: int = 128,
                 resize: Optional[Tuple[int, int]] = None):
        super().__init__()
        self._batch_size = batch_size
        self._resize = resize

        self.train_dataset = Dataset()
        self.val_dataset = Dataset()
        self.test_dataset = Dataset()
Esempio n. 4
0
 def __init__(self, data_path):
     Dataset.__init__(self)
     bvh = BVH()
     bvh.load(data_path)
     self.bvh = bvh
     self.phases = np.loadtxt(data_path.replace('bvh', 'phase'))
     self.root_motions = bvh.motions[:, :num_of_root_infos]
     self.phase_deltas = self.phases[1:] - self.phases[:-1]
     self.phase_deltas *= phase_scale
     print(self.phase_deltas[200:300])
Esempio n. 5
0
 def __read_video_with_lintel(self, sample_name, indices=None):
     file = self.rgb_directory + '/' + sample_name + '_rgb.avi'
     fin = open(file, 'rb')
     video = fin.read()
     Dataset = namedtuple('Dataset', 'width height num_frames')
     dataset = Dataset(1920, 1080, None)
     if indices:
         video = lintel.loadvid_frame_nums(video,
                                           frame_nums=indices,
                                           width=dataset.width,
                                           height=dataset.height)
     else:
         video, seek_distance = lintel.loadvid(video,
                                               should_random_seek=True,
                                               width=dataset.width,
                                               height=dataset.height)
     video = np.frombuffer(video, dtype=np.uint8)
     video = np.reshape(video,
                        newshape=(-1, dataset.height, dataset.width, 3))
     fin.close()
     result = []
     if self.image_transforms:
         for i in range(len(video)):
             result.append(self.image_transforms(video[i]))
     return torch.stack(result)
Esempio n. 6
0
    def __init__(self,
                 data_root,
                 transform=None):
        """TODO: to be defined.

        :data_root: TODO
        :transform: TODO

        """
        Dataset.__init__(self)

        assert isinstance(data_root, str)
        assert Path(data_root).resolve().exists()
        self._data_root = Path(data_root).resolve()
        assert transform is not None
        self._transform = transform
Esempio n. 7
0
    def __init__(self, data_path):
        Dataset.__init__(self)
        bvh = BVH()
        bvh.load(data_path)
        self.bvh = bvh
        self.phases = np.loadtxt(data_path.replace('bvh',
                                                   'phase'))[start_index:]
        self.phase_deltas = (self.phases[1:] - self.phases[:-1])
        self.phase_deltas[self.phase_deltas < 0] += 1
        self.phase_deltas_mean = np.mean(self.phase_deltas, axis=0)
        self.phase_deltas_std = np.std(self.phase_deltas, axis=0)
        self.phase_deltas = (self.phase_deltas -
                             self.phase_deltas_mean) / self.phase_deltas_std
        self.phase_deltas *= phase_scale

        self.root_motions = bvh.motions[start_index:, :num_of_root_infos]
        self.trajectories = self.root_motions[:, [0, 2, 4]]
        self.trajectories = self.trajectories[1:] - self.trajectories[:-1]

        print(self.root_motions[:-1, [4]].shape)
        self.trajectories = np.concatenate(
            [self.trajectories, self.root_motions[:-1, [4]]], axis=1)
        self.trajectory_mean = np.mean(self.trajectories, axis=0)
        self.trajectory_std = np.std(self.trajectories, axis=0)
        self.trajectories = (self.trajectories -
                             self.trajectory_mean) / self.trajectory_std
        self.trajectories *= trajectory_scale

        self.angles = self.bvh.motion_angles[start_index:]
        self.angles_mean = np.mean(self.angles, axis=0)
        self.angles_std = np.std(self.angles, axis=0)
        # print(self.angles_mean.shape)
        # print(self.angles_std)
        # print(self.angles)
        print(self.angles.shape)
        self.angles_delta = self.angles[1:] - self.angles[:-1]
        self.angles_delta_mean = np.mean(self.angles_delta, axis=0)
        self.angles_delta_std = np.std(self.angles_delta, axis=0)

        self.angles = (self.angles-self.angles_mean) / \
            (self.angles_std+(self.angles_std == 0))
        self.angles_delta = (self.angles_delta-self.angles_delta_mean) / \
            (self.angles_delta_std+(self.angles_delta_std == 0))

        self.angles *= angles_scale
        self.angles_delta *= angles_scale
Esempio n. 8
0
 def __init__(self, data_path):
     Dataset.__init__(self)
     bvh = BVH()
     bvh.load(data_path)
     self.bvh = bvh
     self.phases = np.loadtxt(data_path.replace('bvh', 'phase'))
     print(self.phases.shape)
     root_motions = bvh.motions[:, :num_of_root_infos]
     self.root_deltas = root_motions[1:] - root_motions[:-1]
     print(self.root_deltas.shape)
     self.root_deltas[:, 0] *= delta_scale
     self.root_deltas[:, 2] *= delta_scale
     for i in range(6):
         items = self.root_deltas[:, i]
         print(np.max(items), np.min(items), np.mean(items))
     self.phase_deltas = self.phases[1:] - self.phases[:-1]
     self.phase_deltas *= phase_scale
     items = self.phase_deltas
     print(np.max(items), np.min(items), np.mean(items))
Esempio n. 9
0
def dataset_loader(data_path):
    transformations = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    custom_dataset = Dataset(data_path, transformations)
    dataset_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
                                                 batch_size=1,
                                                 shuffle=False)
    return dataset_loader
Esempio n. 10
0
    def __init__(self, files_source, transforms=None):
        Dataset.__init__(self)
        self.transforms = transforms

        # get number of classes
        self.classes = sorted(os.listdir(files_source))
        self.name_to_id = {
            self.classes[i]: i
            for i in range(len(self.classes))
        }
        if len(self.classes) != 1000:
            raise ValueError(
                "There should be 1000 classes, only {} exist".format(
                    len(self.classes)))

        # get files list
        if isinstance(files_source, str) and os.path.isdir(files_source):
            self.files = glob.glob(os.path.join(files_source, "*/*.npz"))
        else:
            raise ValueError(
                "You should provide either dir_path or paths_list!")
        out = self.conv4(out)
        out = self.relu(out)
        out = self.pool2(out)
        out = torch.flatten(out, 1)
        out = self.fc1(out)

        return out


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

batch_size = 10
learning_rate = 0.001
n_epoch = 100

Train_data = Dataset(images=X_train, labels=Y_train)
Test_data = Dataset(images=X_test, labels=Y_test)

Trainloader = DataLoader(Train_data, batch_size=batch_size, shuffle=True)
Testloader = DataLoader(Test_data, batch_size=batch_size)

net = CNN()
net.to(device)
summary(net, (3, 150, 150),
        device='cuda' if torch.cuda.is_available() else 'cpu')

optimizer = optim.Adam(net.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()

train_losses = []
train_accs = []
Esempio n. 12
0

batch_size = 16
learning_rateD = 0.00001 #0.000001 
learning_rateG = 0.0001 #0.00001 
num_epoch = 10000


root = '/content/drive/MyDrive/data_download/dataset_images/test_256' #div2k' 
crop_size = 96
scale = 4

ep = []
k = 0  # шаги в графике лосса

train_dataset = Dataset(root, crop_size, scale)
train = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)


net_G = Generator()
net_D = Discriminator()

if use_gpu:  # использоване gpu
    net_G.to(device)
    net_D.to(device)

criterion_G = Loss_Generator().to(device) if use_gpu else Loss_Generator()
criterion_D = Loss_Discriminator().to(device) if use_gpu else Loss_Discriminator()
optimizer_G = optim.Adam(net_G.parameters(), lr=learning_rateG) 
optimizer_D = optim.Adam(net_D.parameters(), lr=learning_rateD) 
Esempio n. 13
0
        point_set = point_set[0:self.npoints, :]
        if self.normalize:
            point_set = pc_normalize(point_set)
        if not self.normal_channel:
            point_set = point_set[:, 0:3]
            # how does cache work???
            '''if len(self.cache) < self.cache_size:
                self.cache[idx] = (point_set, cls)'''
        #point_set = np.expand_dims(point_set, 0)
        return point_set, cls

    def __len__(self):
        return (len(self.datapath))


if __name__ == '__main__':
    dsDir = '/home/fred/lyx/data/modelnet40_normal_resampled/'
    BATCH_SIZE = 32
    NUM_POINT = 1024
    NUM_WORKER = 4
    trainset = Dataset(dsDir,
                       batch_size=32,
                       npoints=NUM_POINT,
                       shuffle=True,
                       modelnet10=True)
    trainloader = DataLoader(trainset, batch_size=32, num_workers=NUM_WORKER)

    dataiter = iter(trainloader)
    inputs, labels = dataiter.next()
    print(inputs.shape)
Esempio n. 14
0
 def __init__(self, bvh, num_of_frames):
     Dataset.__init__(self)
     self.bvh = bvh
     self.motions = bvh.motions
     self.num_of_frames = num_of_frames
#=================================================================#

BATCHSIZE = 64
sp = 0.01

data_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomCrop(size=(224, 224), padding=(10, 10)),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.RandomRotation(degrees=15, fill=0),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

path = "shopee-product-detection-dataset/train/train/"
dataset_train = Dataset(path, data_transform, train=True)
dataset_valid = Dataset(path, data_transform, train=False)

loadertr = torch.utils.data.DataLoader(dataset_train,
                                       batch_size=BATCHSIZE,
                                       shuffle=True)
loaderval = torch.utils.data.DataLoader(dataset_valid,
                                        batch_size=BATCHSIZE,
                                        shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
criterion = nn.CrossEntropyLoss()

model = models.resnet18(pretrained=True)
model.fc = nn.Linear(512, 42)
model.to(device)
out_data_u_train = lmap(math.sin, in_data_x_train)
out_data_v_train = lmap(math.cos, in_data_x_train)

# sin and cos values to test learned model against
out_data_u_test = lmap(math.sin, in_data_x_test)
out_data_v_test = lmap(math.cos, in_data_x_test)

# convert data to numpy tables
in_data_train = np.row_stack(in_data_x_train)
in_data_test = np.row_stack(in_data_x_test)

out_data_train = np.column_stack((out_data_u_train, out_data_v_train))
out_data_test = np.column_stack((out_data_u_test, out_data_v_test))

# numpy tables to pytorch Datasets
data_train = Dataset(in_data=in_data_train, out_data=out_data_train)
data_test = Dataset(in_data=in_data_test, out_data=out_data_test)

data_loader_train = DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_loader_test = DataLoader(dataset=data_test, batch_size=len(data_test), shuffle=False)

# train and test model
losses = train(model=model, loader=data_loader_train, optimizer=optimizer, loss_fn=loss_fn, epochs=epochs)
predictions = test(model=model, loader=data_loader_test)

print("final loss:", sum(losses[-100:]) / 100)

plot_loss(losses)
plot_predictions(out_data_test, predictions)

# write model to js function
Esempio n. 17
0
def construct_full_dataset(trn, tst):
    Dataset = namedtuple('Dataset', ['X', 'Y', 'y', 'nb_classes'])
    X = np.concatenate((trn.X, tst.X))
    y = np.concatenate((trn.y, tst.y))
    Y = np.concatenate((trn.Y, tst.Y))
    return Dataset(X, Y, y, trn.nb_classes)
Esempio n. 18
0
            counter = 0
            id_map = {}

        line = line.strip()
        line = line.replace('.', ' . ')
        line = line[line.find(' ')+1:]
        # if not a question
        if line.find('?') == -1:
            task["C"] += line + '<line>'
            id_map[id] = counter
            counter += 1
        else:
            idx = line.find('?')
            tmp = line[idx+1:].split('\t')
            task["Q"] = line[:idx]
            task["A"] = tmp[1].strip()
            task["S"] = [] # Supporting facts
            for num in tmp[2].split():
                task["S"].append(id_map[int(num.strip())])
            tc = task.copy()
            tc['C'] = tc['C'].split('<line>')[:-1]
            tasks.append(tc)
    return tasks

if __name__ == '__main__':
    dset_train = Dataset(20, is_train=True)
    train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate)
    for batch_idx, data in enumerate(train_loader):
        contexts, questions, answers = data
        break