Exemplo n.º 1
0
    def make_train_graph(self):
        vars_to_train = #?? Start with tf.trainable_variables

        if self.config.weight_decay > 0:
            weight_norm = #??
            self.loss += self.config.weight_decay * weight_norm
        pprint(vars_to_train)
        
        self.train_step = self.opt.minimize(self.loss, var_list=vars_to_train)

        if self.model_id != -1:
            start_epoch = self.restore()
        else:
            self.init_new_model()
            start_epoch = 0

        log_dir = os.path.join(self.model_dir, 'logs/train')
        val_dir = os.path.join(self.model_dir, 'logs/val')
        
        self.train_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
        self.val_writer = tf.summary.FileWriter(val_dir, self.sess.graph)

        self.train_dataset = ImageDataset(
            data_dir=os.path.join(self.data_dir, 'train'),
            h=self.height,
            w=self.width,
            batch_size=self.batch_size,
            crop_proportion=0.8
        )
        self.val_dataset = ImageDataset(
            #what's the difference
        )
        return start_epoch
Exemplo n.º 2
0
    def _split_train_valid(self, batch_size=256):

        self.logger.log("Splitting in 90%/10% train/validation")
        X_train, X_valid, y_train, y_valid = train_test_split(self.data,
                                                              self.labels,
                                                              test_size=0.10,
                                                              random_state=13)

        train_dict = {'X': X_train, 'y': y_train}
        valid_dict = {'X': X_valid, 'y': y_valid}

        train_dataset = ImageDataset(train_dict,
                                     self.logger,
                                     transform=self.train_transform)
        valid_dataset = ImageDataset(valid_dict,
                                     self.logger,
                                     transform=self.valid_transform)

        self.dataloaders = {
            'train':
            torch.utils.data.DataLoader(train_dataset,
                                        batch_size=batch_size,
                                        shuffle=True),
            'valid':
            torch.utils.data.DataLoader(valid_dataset,
                                        batch_size=batch_size,
                                        shuffle=True)
        }
        self.data, self.labels = None, None
Exemplo n.º 3
0
def main():
    # CHANGE LOSS FUNCTION TO CORRECT ONE
    wandb.init(project="cloud_segmentation")
    # Setup device selection
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    if torch.cuda.is_available():
        print("Running on gpu")
    else:
        print("Running on cpu")
    # define hyper-paremeters
    batch_size = 2
    learning_rate = 0.001
    n_epochs = 2
    wandb.config.update({
        "epochs": n_epochs,
        "batch_size": batch_size,
        "learning_rate": learning_rate
    })

    # Setup image transforms and data augmentation
    transforms = utils.get_transforms(False)

    # split train test set
    x_train, y_train, x_val, y_val = get_train_val_set(utils.TRAIN_LABELS)
    shape = (1400, 2100, 3)
    train_dataset = ImageDataset(utils.TRAIN_IMAGES, x_train, y_train,
                                 transforms, shape)
    val_dataset = ImageDataset(utils.TRAIN_IMAGES, x_val, y_val, transforms,
                               shape)

    data_loader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4)

    data_loader_val = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=4)

    # Define and train model
    model = SegNet()
    wandb.watch(model, log="all")
    model.to(device)

    optimizer = optim.Adam(model.parameters(), learning_rate)

    for epoch in range(n_epochs):
        print("Epoch:", epoch)
        train_step(model, data_loader, optimizer, device)
        eval_step(model, data_loader_val, device)

    # save model to W&B
    torch.save(model.state_dict(), wandb.run.dir + "/model.pt")
Exemplo n.º 4
0
def load_from_directory(source_dir,
                        split,
                        batch_size,
                        num_repeats,
                        subsample=1,
                        memmap_directory=None):
    fs = FilenamesDataset(source_dir)
    fs.split(split)
    ds_train = ImageDataset(fs.train_filenames,
                            fs.train_cls,
                            subsample=subsample,
                            memmap_directory=memmap_directory)
    ds_train.load()
    ds_test = ImageDataset(fs.test_filenames,
                           fs.test_cls,
                           subsample=subsample,
                           memmap_directory=memmap_directory)
    ds_test.load()
    tf_train = IICGenerator(ds_train.data,
                            ds_train.cls,
                            batch_size=batch_size,
                            num_repeats=num_repeats,
                            prefetch=16,
                            is_training=True)
    tf_test = IICGenerator(ds_train.data,
                           ds_train.cls,
                           batch_size=batch_size,
                           num_repeats=num_repeats,
                           prefetch=16,
                           is_training=False)
    return tf_train.create(), tf_test.create(), fs.num_classes
Exemplo n.º 5
0
def save_ds_samples():
    """Load datasets from pickle files. Get samples from random indices and save them as JPEG images"""

    pickles = [
        "../datasets/stl10/original_train.pkl",
        "../datasets/stl10/mirror_train.pkl",
        "../datasets/stl10/rot_90_1_train.pkl",
        "../datasets/stl10/rot_90_3_train.pkl",
        "../datasets/stl10/rand_distorted_train_0.pkl",
        "../datasets/stl10/rand_distorted_train_1.pkl",
        "../datasets/stl10/rand_distorted_train_2.pkl",
    ]
    dataset = ImageDataset.load_from_pickles(pickles)
    items_per_pickle = 11000

    for i in range(20):
        images = []
        idx = randint(0, items_per_pickle - 1)
        for j in range(len(pickles)):
            images.append(
                LabeledImage.load_from_dataset(dataset,
                                               index=j * items_per_pickle +
                                               idx,
                                               max_value=1), )
        LabeledImage(np.concatenate([x.image for x in images], axis=1), images[0].name) \
            .save(location="../samples/", name="{}_{}".format(idx, images[0].name))
Exemplo n.º 6
0
def main(data_dir,
         n_classes=2,
         input_shape=(64, 64, 3),
         num_epochs=20,
         batch_size=64):
    model = get_model(input_shape, n_classes)
    img_dim = [input_shape[0], input_shape[1]]
    train_dataset = ImageDataset(data_dir + "/train*", num_epochs, batch_size,
                                 img_dim).get_dataset()
    optimizer = tf.train.AdamOptimizer()

    train(model, train_dataset, optimizer)

    test_dataset = ImageDataset(data_dir + "/validation*", 1, batch_size,
                                img_dim).get_dataset()
    evaluate(model, test_dataset)
Exemplo n.º 7
0
def main() -> None:
    """
    Main program.
    """

    ensure_reproducibility()

    # --- Vector data ---

    # Load the vector dataset.
    vector_data = VectorDataset(REPROCESS_DATA)

    # Plot histogram that show distribution of labels in train and test sets.
    # If the two distributions are similar, we know that we've done a good job
    # with the splitting.
    graphing.plot_category_histograms(vector_data, GRAPH_HISTOGRAM_FILENAME)

    # Try using logistic regression.
    try_logistic_regression(vector_data)

    # Try using neural networks. This should work better.
    try_neural_networks(vector_data, VECTOR_ROUND1_PARAM_DISTRIBUTIONS)

    # --- Image data ---

    # Load the image dataset.
    image_data = ImageDataset(REPROCESS_DATA)

    # Try using convolutional neural networks.
    try_neural_networks(image_data, IMAGE_ROUND1_PARAM_DISTRIBUTIONS)
Exemplo n.º 8
0
    def classify_images(self, batch):
        start_time_classification = time.time()

        tensors_dataset = ImageDataset(batch)

        loader = torch.utils.data.DataLoader(tensors_dataset,
                                             batch_size=self.batch_size,
                                             num_workers=self.num_workers)
        results = []

        print("Starting classifying %d images" % len(tensors_dataset))

        with torch.no_grad():
            for batch in loader:
                start_time_batch = time.time()

                image_ids = batch[0]
                image_tensors = batch[1]

                print("Classifying a batch of %d images" % len(image_ids))

                predictions = self.model(image_tensors.to(
                    self.compute_device)).cpu()
                batch_results = self.get_matched_labels(predictions, image_ids)
                results.extend(batch_results)

                duration_batch = time.time() - start_time_batch
                print("Classified a batch of %d elements in %.2f seconds" %
                      (len(image_ids), duration_batch))

        duration_classification = time.time() - start_time_classification
        print("Finished classifying the images. Duration: %.2f" %
              duration_classification)

        return results
Exemplo n.º 9
0
    def setUp(self):

        with open("tests/test_config.yaml") as f:
            config = yaml.safe_load(f)

        model_config = config["model_configuration"]
        training_config = config["training_configuration"]
        data_config = config["data_configuration"]

        dataset = ImageDataset(data_config=data_config,
                               transform=True,
                               mode="train")

        self.dataloader = DataLoader(dataset,
                                     batch_size=training_config["batch_size"],
                                     num_workers=0,
                                     shuffle=True)

        num_classes = len(data_config['classes'])

        aspect_ratios = data_config['aspect_ratios']

        layers = model_config['layers']
        backbone = SimpleBackbone(layers)

        self.model = SSD(backbone, aspect_ratios, num_classes).to('cpu')
Exemplo n.º 10
0
def load_eval_dataset():
    """Load Evaluation dataset from list of pickle files"""

    dataset = ImageDataset.load_from_pickles([
        "../datasets/stl10/original_test.pkl",
    ])

    return dataset.x, dataset.y
Exemplo n.º 11
0
def load_dataset(csv_file, **kwargs):
    """
    Function to load in the Kaggle-version of the MNIST-dataset.
    
    Arguments:
        csv_file {string} -- location of csv-file

    Optional Arguments:
        split {float} -- Training-set split
    Returns:
        [tuple] -- (training_dataset, validation_dataset)
    """

    split = kwargs.pop('split', 0.85)

    normalize = transforms.Normalize(mean=(0.1307, ),
                                     std=(0.3081, ),
                                     inplace=True)

    transform_train = transforms.Compose([
        transforms.RandomRotation(15, fill=(0, )),
        transforms.ToTensor(), normalize
    ])

    transform_val = transforms.Compose([transforms.ToTensor(), normalize])

    data = pd.read_csv(csv_file).to_numpy()

    if data.shape[1] == 785:
        target = data[:, 0]
        images = np.reshape(data[:, 1:], (-1, 28, 28))
        train_img, val_img, train_labels, val_labels = train_test_split(
            images, target, train_size=split)

        train_set = ImageDataset((train_img, train_labels), transform_train)
        val_set = ImageDataset((val_img, val_labels), transform_val)

        return train_set, val_set

    elif data.shape[1] == 784:
        images = np.reshape(data, (-1, 28, 28))
        return ImageDataset(images, transform_val)
    else:
        print('Wrong Image-size for this architecture. Expected size 28x28.')
Exemplo n.º 12
0
    def setUp(self):

        with open("tests/test_config.yaml") as f:
            config = yaml.safe_load(f)

        self.data_config = config["data_configuration"]

        self.batch_size = 10

        self.dataset = ImageDataset(
            data_config=self.data_config,
            transform=True,
        )
Exemplo n.º 13
0
def load_cifar10():
    train_ds = ImageDataset.load_from_pickles([
        "/datasets/cifar10/original_train.pkl",
        "/datasets/cifar10/mirror_train.pkl",
        "/datasets/cifar10/rand_distorted_train.pkl",
    ])
    test_ds = ImageDataset.load_from_pickles([
        "/datasets/cifar10/original_test.pkl",
    ])

    offset = 1024
    img0 = LabeledImage.load_from_dataset_tuple((train_ds.x, train_ds.y),
                                                0 + offset)
    img1 = LabeledImage.load_from_dataset_tuple((train_ds.x, train_ds.y),
                                                50000 + offset)
    img2 = LabeledImage.load_from_dataset_tuple((train_ds.x, train_ds.y),
                                                100000 + offset)

    mixed_img = np.concatenate([img0.image, img1.image, img2.image], axis=1)
    LabeledImage(mixed_img, "mixed_" + img0.name) \
        .save_image()

    return (train_ds.x, train_ds.y), (test_ds.x, test_ds.y)
Exemplo n.º 14
0
def load_train_dataset():
    """Load Training dataset from list of pickle files"""

    dataset = ImageDataset.load_from_pickles([
        "../datasets/stl10/original_train.pkl",
        "../datasets/stl10/mirror_train.pkl",
        # "../datasets/stl10/rot_90_1_train.pkl",
        # "../datasets/stl10/rot_90_3_train.pkl",
        "../datasets/stl10/rand_distorted_train_0.pkl",
        # "../datasets/stl10/rand_distorted_train_1.pkl",
        # "../datasets/stl10/rand_distorted_train_2.pkl",
    ])

    return dataset.x, dataset.y
def main(args):
    ############### Arguments ###############
    predict_mode = args.mode
    input_folder = args.input_path
    output_folder = args.output_path
    base_model_path = "./p2/p2_vgg_baseline.pth"
    improved_model_path = "./p2/p2_vgg_improved.pth"
    batch_size = 1
    #########################################
    test_set = ImageDataset("predict", predict_img_path=input_folder)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    if predict_mode == "baseline":
        model = FCN32s()
        save_model_path = base_model_path
    elif predict_mode == "improved":
        model = FCN8s()
        save_model_path = improved_model_path

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.load_state_dict(torch.load(save_model_path, map_location=device))

    with torch.no_grad():
        model.eval()
        for image_tensor, img_id in test_loader:
            # predict
            image_tensor = image_tensor.to(device)
            output = model(image_tensor)
            _, pred = torch.max(output, 1)
            pred = pred[0]

            # save image
            pred = pred.cpu().numpy()
            pred_img = np.zeros((512, 512, 3))
            pred_img[np.where(pred == 0)] = [0, 255, 255]
            pred_img[np.where(pred == 1)] = [255, 255, 0]
            pred_img[np.where(pred == 2)] = [255, 0, 255]
            pred_img[np.where(pred == 3)] = [0, 255, 0]
            pred_img[np.where(pred == 4)] = [0, 0, 255]
            pred_img[np.where(pred == 5)] = [255, 255, 255]
            pred_img[np.where(pred == 6)] = [0, 0, 0]

            img_id = img_id[0]
            scipy.misc.imsave("{}/{}_mask.png".format(output_folder, img_id),
                              np.uint8(pred_img))
Exemplo n.º 16
0
def main():

    config, use_val = load_configurations()

    training_config = config["training_configuration"]
    data_config = config["data_configuration"]

    mode = "train"

    if use_val:
        mode = "val"


    dataset = ImageDataset(
        data_config=data_config,
        transform=True,
        mode=mode
    )

    dataloader = DataLoader(
        dataset,
        batch_size=training_config["batch_size"],
        num_workers=0,
        shuffle=True
    )

    images, labels = next(iter(dataloader))
    bounding_boxes = convert_to_bounding_boxes(labels)

    bbs_applied_images = []

    for image, bbs in zip(images, bounding_boxes):

        bbs = BoundingBoxesOnImage(bbs, shape=image.shape)
        bbs_applied_images.append(
            bbs.draw_on_image(image, size=2, color=[0, 0, 255])
        )

    batch_image = cv2.vconcat(bbs_applied_images)

    file_name = "image.jpg"
    cv2.imwrite(file_name, batch_image)
Exemplo n.º 17
0
def train_gan(generator, discriminator, batch_size=256, epochs=50):
    '''
        The training loop for the whole network
    '''
    loss_fun = discriminator.loss_fun()
    real_image_dataset = torchvision.datasets.MNIST(
        './data',
        download=True,
        train=True,
        transform=torchvision.transforms.ToTensor(),
    )
    training_dataset = ImageDataset(real_image_dataset, generator, batch_size)
    training_dataloader = torch.utils.data.DataLoader(
        training_dataset,
        shuffle=True,
        batch_size=batch_size,
    )
    for e in range(epochs):
        # train for each data sample
        print('epoch {}: '.format(str(e)))
        dis_acc = 0
        gen_loss = 0
        for data in training_dataloader:
            mixed_inputs, mixed_labels = data
            # first train discriminator with real data
            discriminator_accuracy = train_discriminator(
                mixed_inputs, mixed_labels, discriminator, loss_fun,
                batch_size)
            dis_acc += discriminator_accuracy
            # then train generator with one batch
            generator_loss = train_generator(batch_size, generator,
                                             discriminator, loss_fun)
            gen_loss += generator_loss
        if len(training_dataloader) == 0:
            print('Where\'s your data you dweeb')
            exit()
        dis_acc_av = float(dis_acc) / len(training_dataloader)
        gen_loss_av = float(gen_loss) / len(training_dataloader)
        print('Average discriminator accuracy {} and generator loss {}'.format(
            dis_acc_av, gen_loss_av))
    return training_dataset
Exemplo n.º 18
0
    def _load_test_data(self, path_in_data_folder):

        data, labels, fnames = [], [], []

        full_path = os.path.join(self.logger.data_folder, path_in_data_folder)
        enc_path = os.fsencode(full_path)
        for file in os.listdir(enc_path):
            filename = os.fsdecode(file)
            if filename.endswith(".png"):
                data.append(
                    cv2.imread(
                        self.logger.get_data_file(filename,
                                                  path_in_data_folder)))
                labels.append(self.labels_dict[filename])
                fnames.append(filename)

        data_dict = {'X': data, 'y': labels}
        self.crt_dataset = ImageDataset(data_dict,
                                        self.logger,
                                        transform=self.data_transform,
                                        fnames=fnames)
        self.crt_folder = os.path.basename(path_in_data_folder)
        self.crt_size = len(fnames)
Exemplo n.º 19
0
def main():

    # Use GPU if available
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Using {} device'.format(device))

    config = load_configurations()

    model_config = config["model_configuration"]
    training_config = config["training_configuration"]
    data_config = config["data_configuration"]

    print(config)

    dataset = ImageDataset(
        data_config=data_config,
        transform=True,
    )

    num_classes = len(data_config['classes'])

    model = SSD(num_classes).to(device)

    trainer = Trainer(model, dataset, loss_function, training_config)
Exemplo n.º 20
0
                optimizer.zero_grad()
                recon = model(data)
                loss = criterion(recon, data)
                # loss = criterion(recon, data_2_imag)
                loss.backward()
                optimizer.step()
            print('Epoch:{}, Loss:{:.4f}'.format(epoch + 1, loss.item()))
            writer.writerow('Epoch:{}, Loss:{:.4f}'.format(
                epoch + 1, loss.item()))
            outputs.append((epoch, data, recon))
    return outputs


# Antrenarea pe o imagine fara nori din care vrei sa inveti autoencoderul
# dataset_train1 = ImageDataset(root_dir='./patches_inainte_alunecare', transform=transforms.ToTensor())
dataset_train1 = ImageDataset(root_dir='./patches_inainte_alunecare_12x12',
                              transform=transforms.ToTensor())

dataset_train1 = list(dataset_train1)

# dataset_train2 = ImageDataset(root_dir='./patches_alunecare', transform=transforms.ToTensor())
# dataset_train2 = list(dataset_train2)

train_loader1 = DataLoader(dataset_train1,
                           batch_size=8,
                           shuffle=True,
                           drop_last=True)
# train_loader2 = DataLoader(dataset_train2, batch_size=8, shuffle=True, drop_last=True)

model = CA.Autoencoder()

max_epochs = 100
Exemplo n.º 21
0
def get_dataset(name,
                sparse=True,
                feat_str="deg+ak3+reall",
                root=None,
                aug=None,
                aug_ratio=None):
    if root is None or root == '':
        path = osp.join(osp.expanduser('~'), 'pyG_data', name)
    else:
        path = osp.join(root, name)
    degree = feat_str.find("deg") >= 0
    onehot_maxdeg = re.findall("odeg(\d+)", feat_str)
    onehot_maxdeg = int(onehot_maxdeg[0]) if onehot_maxdeg else None
    k = re.findall("an{0,1}k(\d+)", feat_str)
    k = int(k[0]) if k else 0
    groupd = re.findall("groupd(\d+)", feat_str)
    groupd = int(groupd[0]) if groupd else 0
    remove_edges = re.findall("re(\w+)", feat_str)
    remove_edges = remove_edges[0] if remove_edges else 'none'
    edge_noises_add = re.findall("randa([\d\.]+)", feat_str)
    edge_noises_add = float(edge_noises_add[0]) if edge_noises_add else 0
    edge_noises_delete = re.findall("randd([\d\.]+)", feat_str)
    edge_noises_delete = float(
        edge_noises_delete[0]) if edge_noises_delete else 0
    centrality = feat_str.find("cent") >= 0
    coord = feat_str.find("coord") >= 0

    pre_transform = FeatureExpander(degree=degree,
                                    onehot_maxdeg=onehot_maxdeg,
                                    AK=k,
                                    centrality=centrality,
                                    remove_edges=remove_edges,
                                    edge_noises_add=edge_noises_add,
                                    edge_noises_delete=edge_noises_delete,
                                    group_degree=groupd).transform

    print(aug, aug_ratio)
    if 'MNIST' in name or 'CIFAR' in name:
        if name == 'MNIST_SUPERPIXEL':
            train_dataset = MNISTSuperpixels(path,
                                             True,
                                             pre_transform=pre_transform,
                                             transform=T.Cartesian())
            test_dataset = MNISTSuperpixels(path,
                                            False,
                                            pre_transform=pre_transform,
                                            transform=T.Cartesian())
        else:
            train_dataset = ImageDataset(path,
                                         name,
                                         True,
                                         pre_transform=pre_transform,
                                         coord=coord,
                                         processed_file_prefix="data_%s" %
                                         feat_str)
            test_dataset = ImageDataset(path,
                                        name,
                                        False,
                                        pre_transform=pre_transform,
                                        coord=coord,
                                        processed_file_prefix="data_%s" %
                                        feat_str)
        dataset = (train_dataset, test_dataset)
    else:
        dataset = TUDatasetExt(path,
                               name,
                               pre_transform=pre_transform,
                               use_node_attr=True,
                               processed_filename="data_%s.pt" % feat_str,
                               aug=aug,
                               aug_ratio=aug_ratio)

        dataset.data.edge_attr = None

    return dataset
Exemplo n.º 22
0
    img_grid_AB = make_grid([real_A.squeeze(0), fake_B.squeeze(0)])
    img_grid_BA = make_grid([real_B.squeeze(0), fake_A.squeeze(0)])
    writer.add_image(
        '[test] real_A vs fake_B',
        np.transpose(tensor2im(img_grid_AB.unsqueeze(0)), (2, 0, 1)), step)
    writer.add_image(
        '[test] real_B vs fake_A',
        np.transpose(tensor2im(img_grid_BA.unsqueeze(0)), (2, 0, 1)), step)


if __name__ == '__main__':
    device = torch.device(
        'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
    model = Cyclegan(device)

    dataset = ImageDataset('./dataset/bart2lisa')
    dataloader = data.DataLoader(dataset)

    writer = SummaryWriter()

    model.load_networks(0)

    for i, d in enumerate(dataloader):
        real_A, real_B = d['A'], d['B']

        # visualize results on tensorboard
        if ((i % 100) == 0):
            fake_B, fake_A = model.forward(real_A, real_B)
            write_to_tensorboard(writer, real_A, real_B,
                                 fake_A.detach().cpu(),
                                 fake_B.detach().cpu(), i)
Exemplo n.º 23
0
def get_dataset(name, sparse=True, feat_str="deg+ak3+reall", root=None):
    if root is None or root == '':
        path = osp.join(osp.expanduser('~'), 'pyG_data', name)
    else:
        path = osp.join(root, name)
    degree = feat_str.find("deg") >= 0
    onehot_maxdeg = re.findall("odeg(\d+)", feat_str)
    onehot_maxdeg = int(onehot_maxdeg[0]) if onehot_maxdeg else None
    k = re.findall("an{0,1}k(\d+)", feat_str)
    k = int(k[0]) if k else 0
    groupd = re.findall("groupd(\d+)", feat_str)
    groupd = int(groupd[0]) if groupd else 0
    remove_edges = re.findall("re(\w+)", feat_str)
    remove_edges = remove_edges[0] if remove_edges else 'none'
    centrality = feat_str.find("cent") >= 0
    coord = feat_str.find("coord") >= 0

    pre_transform = FeatureExpander(degree=degree,
                                    onehot_maxdeg=onehot_maxdeg,
                                    AK=k,
                                    centrality=centrality,
                                    remove_edges=remove_edges,
                                    group_degree=groupd).transform

    if 'MNIST' in name or 'CIFAR' in name:
        if name == 'MNIST_SUPERPIXEL':
            train_dataset = MNISTSuperpixels(path,
                                             True,
                                             pre_transform=pre_transform,
                                             transform=T.Cartesian())
            test_dataset = MNISTSuperpixels(path,
                                            False,
                                            pre_transform=pre_transform,
                                            transform=T.Cartesian())
        else:
            train_dataset = ImageDataset(path,
                                         name,
                                         True,
                                         pre_transform=pre_transform,
                                         coord=coord,
                                         processed_file_prefix="data_%s" %
                                         feat_str)
            test_dataset = ImageDataset(path,
                                        name,
                                        False,
                                        pre_transform=pre_transform,
                                        coord=coord,
                                        processed_file_prefix="data_%s" %
                                        feat_str)
        dataset = (train_dataset, test_dataset)
    elif 'QM9' in name:
        dataset = QM9Ext(path,
                         pre_transform=pre_transform,
                         processed_filename="data_%s.pt" % feat_str)
    elif 'ModelNet' in name:
        pre_transform = FeatureExpander(
            degree=degree,
            onehot_maxdeg=onehot_maxdeg,
            AK=k,
            centrality=centrality,
            remove_edges=remove_edges,
            group_degree=groupd).cloud_point_transform
        train_dataset = ModelNetExT(path,
                                    train=True,
                                    pre_transform=pre_transform,
                                    processed_file_prefix="data_%s" % feat_str)
        test_dataset = ModelNetExT(path,
                                   train=True,
                                   pre_transform=pre_transform,
                                   processed_file_prefix="data_%s" % feat_str)
        dataset = (train_dataset, test_dataset)
    elif 'TOSCA' in name:
        # pre_transform = FeatureExpander(
        #     degree=degree, onehot_maxdeg=onehot_maxdeg, AK=k,
        #     centrality=centrality, remove_edges=remove_edges,
        #     group_degree=groupd).cloud_point_transform
        dataset = TOSCAEXT(path,
                           pre_transform=pre_transform,
                           processed_file_prefix="data_%s" % feat_str)
    else:
        dataset = TUDatasetExt(path,
                               name,
                               pre_transform=pre_transform,
                               use_node_attr=True,
                               processed_filename="data_%s.pt" % feat_str)

        dataset.data.edge_attr = None

    return dataset
Exemplo n.º 24
0
import os
from PIL import Image
import torch
import csv
import matplotlib.pyplot as plt

device = torch.device('cuda')
model = SegNet()
model_dir = "wandb/run-20191017_073956-zukd8wh5/model.pt"
model.load_state_dict(torch.load(model_dir))
model.eval()
model = model.to(device)

transforms = utils.get_transforms(False)
shape = (1400, 2100, 3)
test_dataset = ImageDataset(utils.TEST_IMAGES, os.listdir(utils.TEST_IMAGES),
                            None, transforms, shape, True)
batch_size = 1
data_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=4)

encodes = [["Image_Label", "EncodedPixels"]]
for i, data in enumerate(data_loader):
    image, path = data
    image = image.to(device)
    out = model(image.view(-1, 3, 350, 525))
    out = out.cpu().detach().numpy()
    print(str(i) + "/" + str(len(os.listdir(utils.TEST_IMAGES))))
    plt.imshow(utils.conv_image(image[0]))
    plt.show()
    #########################################################################################
    #                                  Loading Data                                         #
    #########################################################################################

    # Create training set
    inp_imgs = []
    tgt_imgs = []

    for i in sorted(training_scans):
        inp_imgs.extend(natsorted(glob.glob(os.path.join(dataset_dir, 
            'Walnut{}/fdk_pos{}_*.tif*'.format(i, pos)))))
        tgt_imgs.extend(natsorted(glob.glob(os.path.join(dataset_dir, 
            'Walnut{}/iterative_iter50_*.tif*'.format(i)))))

    train_ds = ImageDataset(inp_imgs, tgt_imgs)
    print('Training set size', str(len(train_ds)))

    # Create validation set
    inp_imgs = []
    tgt_imgs = []

    for i in sorted(val_scans):
        inp_imgs.extend(natsorted(glob.glob(os.path.join(dataset_dir, 
            'Walnut{}/fdk_pos{}_*.tif*'.format(i, pos)))))
        tgt_imgs.extend(natsorted(glob.glob(os.path.join(dataset_dir, 
            'Walnut{}/iterative_iter50_*.tif*'.format(i)))))
          
    val_ds = ImageDataset(inp_imgs, tgt_imgs)
    print('Validation set size', str(len(val_ds)))
Exemplo n.º 26
0
if __name__ == "__main__":
    from torchvision.datasets import CelebA, MNIST

    dataset = CelebA("/run/media/gerben/LinuxData/data/", download=False,
                     transform=transforms.Compose([
                         transforms.CenterCrop(178),
                         transforms.Resize(128),
                         transforms.ToTensor()
                     ])
                     )

    from image_dataset import ImageDataset

    dataset2 = ImageDataset("/run/media/gerben/LinuxData/data/frgc_cropped",
                            transform=transforms.Compose([
                                transforms.ToTensor()
                            ])
                            )

    dataset3 = ImageDataset("/run/media/gerben/LinuxData/data/ffhq_thumbnails/aligned64",
                            transform=transforms.Compose([
                                transforms.ToTensor()
                            ])
                            )

    dataset4 = ImageDataset("/run/media/gerben/LinuxData/data/celeba/cropped_faces64",
                            transform=transforms.Compose([
                                transforms.ToTensor()
                            ])
                            )
Exemplo n.º 27
0
output_src_encoder_path = "./models/src_encoder_{}_{}.pth".format(d_source, d_target)
output_src_classifier_path = "./models/src_classifier_{}_{}.pth".format(d_source, d_target)
output_tgt_encoder_path = "./models/tgt_encoder_{}_{}.pth".format(d_source, d_target)
output_discriminator_path = "./models/discirminator_{}_{}.pth".format(d_source, d_target)


print("### [Info] Source: {} | Target: {} ###".format(d_source, d_target))
#############################


# In[3]:


# load datasets
src_dataset = ImageDataset("train", d_source)
tgt_dataset = ImageDataset("train", d_target)
val_src_dataset = ImageDataset("val", d_source)
val_tgt_dataset = ImageDataset("val", d_target)

src_dataloader = DataLoader(src_dataset, batch_size=batch_size, shuffle=True)
tgt_dataloader = DataLoader(tgt_dataset, batch_size=batch_size, shuffle=True)
val_src_dataloader = DataLoader(val_src_dataset, batch_size=batch_size, shuffle=False)
val_tgt_dataloader = DataLoader(val_tgt_dataset, batch_size=batch_size, shuffle=False)

src_label = 0
tgt_label = 1

print(len(src_dataset), len(tgt_dataset), len(val_src_dataset), len(val_tgt_dataset))

Exemplo n.º 28
0
from train import train

# import data
batch_size = 2
lr = 1e-5
n_epochs = 10

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Device:', device)

path = 'RAIN_DATASET_COMPRESSED/ALIGNED_PAIRS'
classes = ['REAL_DROPLETS', 'CLEAN']
# path='RAIN_DATASET_2_COMPRESSED/train'
# classes=['data','gt']

dataset = ImageDataset(path, classes[0], classes[1])
train_dataset, test_dataset = torch.utils.data.random_split(
    dataset, [int(.8 * len(dataset)),
              len(dataset) - int(.8 * len(dataset))])
train_data_loader = torch.utils.data.DataLoader(train_dataset,
                                                batch_size=batch_size,
                                                shuffle=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

net = UNet().to(device)
# net=torch.load('results3/net.pkl').to(device)
#loss_fn=nn.MSELoss()
loss_fn = nn.SmoothL1Loss()
optimizer = optim.Adam(net.parameters(), lr=lr)
Exemplo n.º 29
0
d_target = "usps"

output_src_encoder_path = "./models/src_encoder_{}_{}.pth".format(
    d_source, d_target)
output_tgt_encoder_path = "./models/tgt_encoder_{}_{}.pth".format(
    d_source, d_target)

output_tsne_a_path = "./images/{}_{}_label.png".format(d_source, d_target)
output_tsne_b_path = "./images/{}_{}_domain.png".format(d_source, d_target)
#############################

# In[3]:

# prepare dataset
batch_size = 128
source_dataset = ImageDataset("test", d_source)
target_dataset = ImageDataset("test", d_target)

source_dataloader = DataLoader(source_dataset,
                               batch_size=batch_size,
                               shuffle=False)
target_dataloader = DataLoader(target_dataset,
                               batch_size=batch_size,
                               shuffle=False)

# In[4]:

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# prepare model
# init models
Exemplo n.º 30
0
############### Arguments ###############
batch_size = 4
num_epochs = 50
save_model_path = "./models/vgg-base-fcn8.pth"
#########################################

# In[7]:

model = FCN8s()
vgg16 = models.vgg16(pretrained=True)
model.copy_params_from_vgg16(vgg16)

# In[5]:

train_dataset = ImageDataset("train")
train_dataloader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)

val_dataset = ImageDataset("val")
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

# In[17]:

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# In[15]: