Ejemplo n.º 1
0
def create_simple_data_set(
    n_training_points,
    n_testing_points,
    low=0,
    high=3,
    mode=training_testing_split.SEPERATE,
    kernel=kernel_matern,
    shuffle=True,
):
    """
    This function uses GP to generate data
    """
    gp = gaussian_process(kernel=kernel, verbose=True)

    mid = (low + high) / 2

    if mode == training_testing_split.SEPERATE_LONG:
        x_training, x_testing = __seperate_long(
            n_training_points, n_testing_points, low, high
        )
    elif mode == training_testing_split.SEPERATE:
        x_training, x_testing = __seperate(
            n_training_points, n_testing_points, low, high
        )
    elif mode == training_testing_split.INTERSPREAD:
        x_training, x_testing = __interspread(
            n_training_points, n_testing_points, low, high
        )
    elif mode == training_testing_split.RANDOM:
        x_training, x_testing = __random(n_training_points, n_testing_points, low, high)
    elif mode == training_testing_split.MIXED:

        def r(z):
            dist = np.random.randint(low=1, high=100, size=4)
            λ = lambda x: x / dist.sum()
            vfunc = np.vectorize(λ)
            dist = vfunc(dist)
            return (z * dist).round().astype(int)

        training_dist = r(n_training_points)
        testing_dist = r(n_testing_points)
        x1, x2 = __random(training_dist[0], testing_dist[0], low, high)
        x11, x22 = __interspread(training_dist[1], testing_dist[1], low, high)
        x111, x222 = __interspread(training_dist[2], testing_dist[2], low, high)
        x1111, x2222 = __seperate(training_dist[3], testing_dist[3], low, high)
        x_training = np.vstack([x1, x11, x111, x1111])
        x_testing = np.vstack([x2, x22, x222, x222])

    y_samples = gp.sample(np.vstack([x_training, x_testing]), 1).squeeze()
    y_training = y_samples[: len(x_training)].reshape(-1, 1)
    y_testing = y_samples[len(x_training) :].reshape(-1, 1)
    training_data_set = data_loader.DataSet(X=x_training, Y=y_training)
    testing_data_set = data_loader.DataSet(X=x_testing, Y=y_testing)

    if shuffle:
        training_data_set.shuffle()
        testing_data_set.shuffle()

    return training_data_set, testing_data_set
Ejemplo n.º 2
0
    def test_change_training_size(self):
        conf = self.defaults._replace(train_rat=1.0)
        ds = data_loader.DataSet(conf)
        assert ds.lenOfEpoch('train') == 10
        assert ds.lenOfEpoch('test') == 0

        conf = self.defaults._replace(train_rat=0.0)
        ds = data_loader.DataSet(conf)
        assert ds.lenOfEpoch('train') == 0
        assert ds.lenOfEpoch('test') == 10
Ejemplo n.º 3
0
    def test_limit_dataset_size(self):
        # We only have 3-4 features per label, which means asking for 10 will
        # do nothing.
        conf = self.defaults._replace(train_rat=1.0, num_samples=10)
        ds = data_loader.DataSet(conf)
        assert ds.lenOfEpoch('train') == 10

        # Now there must be exactly 1 feature for each of the three labels,
        # which means a total of 3 features in the data_loader.
        conf = self.defaults._replace(train_rat=1.0, num_samples=1)
        ds = data_loader.DataSet(conf)
        assert ds.lenOfEpoch('train') == 3
Ejemplo n.º 4
0
    def test_nextBatch_reset(self):
        ds = data_loader.DataSet(self.defaults._replace(train_rat=0.8))

        ds.nextBatch(2, 'train')
        assert ds.posInEpoch('train') == 2
        assert ds.posInEpoch('test') == 0

        ds.nextBatch(1, 'test')
        assert ds.posInEpoch('train') == 2
        assert ds.posInEpoch('test') == 1

        ds.reset('train')
        assert ds.posInEpoch('train') == 0
        assert ds.posInEpoch('test') == 1

        ds.reset('test')
        assert ds.posInEpoch('train') == 0
        assert ds.posInEpoch('test') == 0

        ds.nextBatch(2, 'train')
        ds.nextBatch(2, 'test')
        assert ds.posInEpoch('train') == 2
        assert ds.posInEpoch('test') == 2
        ds.reset()
        assert ds.posInEpoch('train') == 0
        assert ds.posInEpoch('test') == 0
Ejemplo n.º 5
0
def test_item_to_mat_conversion():
    """ Test conversion mat > item > mat
    NOT testing item > mat > item, because comparing items more tedious (first need association!),
    """
    # get "original" mat
    params = {'model': {'factor_downsample': 4,
                        'input_height': 512,
                        'input_width': 1536,
                        },
              }
    dataset = data_loader.DataSet(path_csv='../data/train.csv',
                                  path_folder_images='../data/train_images',
                                  path_folder_masks='../data/train_masks',
                                  )
    list_ids = dataset.list_ids
    item_org = dataset.load_item(list_ids[0], flag_load_mask=False)
    dataset_torch = data_loader_torch.DataSetTorch(dataset, params)
    mat_org = dataset_torch.convert_item_to_mat(item_org)

    # convert back
    item = dataset_torch.convert_mat_to_item(mat_org)
    item.img = item_org.img  # need this for back conversion!
    mat = dataset_torch.convert_item_to_mat(item)

    assert np.allclose(mat, mat_org)
Ejemplo n.º 6
0
    def test_nextBatch(self):
        ds = data_loader.DataSet(self.defaults._replace(train_rat=0.8))

        # Basic parameters.
        assert ds.lenOfEpoch('train') == 8
        assert ds.lenOfEpoch('test') == 2
        assert ds.posInEpoch('train') == 0
        assert ds.posInEpoch('test') == 0

        # Fetch one feature/label/handle.
        x, y, handles = ds.nextBatch(1, 'train')
        assert len(x) == len(y) == len(handles) == 1
        assert ds.posInEpoch('train') == 1
        assert ds.posInEpoch('test') == 0
        assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray)
        assert isinstance(handles, np.ndarray)
        assert x.shape == (1, 1, 2, 2) and x.dtype == np.float32
        assert y.shape == (1, ) and y.dtype == np.int32
        assert handles.shape == (1,) and handles.dtype == np.int64

        # Fetch the remaining 7 training data elements.
        x, y, _ = ds.nextBatch(7, 'train')
        assert len(x) == len(y) == len(_) == 7
        assert ds.posInEpoch('train') == 8
        assert ds.posInEpoch('test') == 0

        # Another query must yield nothing because the epoch is exhausted.
        x, y, _ = ds.nextBatch(1, 'train')
        assert np.array_equal(x, np.zeros((0, 1, 2, 2)))
        assert np.array_equal(y, [])
        assert len(_) == 0
Ejemplo n.º 7
0
def evaluate(
    model,
    device,
    params,
):
    # define dataset
    dataset = data_loader.DataSet(
        path_csv=params['datasets']['valid']['path_csv'],
        path_folder_images=params['datasets']['valid']['path_folder_images'],
        path_folder_masks=params['datasets']['valid']['path_folder_masks'],
    )
    dataset_torch = data_loader_torch.DataSetTorch(dataset,
                                                   params,
                                                   flag_augment=False)
    dataset_loader = torch.utils.data.DataLoader(
        dataset=dataset_torch,
        batch_size=params['train']['batch_size_eval'],
        shuffle=False,
        num_workers=0,
    )

    # set model to eval (affects e.g. dropout layers) and disable unnecessary grad computation
    model.eval()
    torch.set_grad_enabled(False)
    torch.cuda.empty_cache()  # empty cuda cache to prevent memory errors
    gc.collect(
    )  # empty unreferenced objects at the end. Not sure whether better at the end?

    # calculate loss for whole dataset
    num_batches = len(dataset_loader)
    loss_per_name = dict()
    print("Evaluating")
    for img_batch, mask_batch, heatmap_batch, regr_batch in tqdm(
            dataset_loader):
        # concat img and mask and perform inference
        input = torch.cat([img_batch, mask_batch],
                          1)  # nbatch, nchannels, height, width
        output = model(input.to(device))

        # calculate loss
        batch_loss_per_name = calc_loss(
            output,
            heatmap_batch.to(device),
            regr_batch.to(device),
            params['train']['loss'],
        )
        for name, batch_loss in batch_loss_per_name.items():
            if name not in loss_per_name:
                loss_per_name[name] = 0
            loss_per_name[name] += batch_loss.data

    # calculate average
    for name, loss in loss_per_name.items():
        loss_per_name[name] = loss.cpu().numpy() / len(dataset_loader)
    len_dataset = len(dataset_loader.dataset)  # check difference
    len_dataset2 = len(dataset_loader)
    return loss_per_name
Ejemplo n.º 8
0
    def test_nextBatch_invalid(self):
        ds = data_loader.DataSet(self.defaults._replace(train_rat=0.8))

        # Invalid value for N.
        with pytest.raises(AssertionError):
            ds.nextBatch(-1, 'train')

        # Unknown dataset name (must be 'train' or 'test').
        with pytest.raises(AssertionError):
            ds.nextBatch(1, 'foo')
Ejemplo n.º 9
0
 def test_basic(self):
     conf = self.defaults._replace(train_rat=0.8)
     ds = data_loader.DataSet(conf)
     assert ds.lenOfEpoch('train') == 8
     assert ds.lenOfEpoch('test') == 2
     assert ds.posInEpoch('train') == 0
     assert ds.posInEpoch('test') == 0
     dim = ds.imageDimensions()
     assert isinstance(dim, np.ndarray)
     assert dim.dtype == np.uint32
     assert dim.tolist() == [1, 2, 2]
     assert ds.classNames() == {0: '0', 1: '1', 2: '2'}
def test_car_to_string():
    """ Test that conversion string -> car -> string works
    """
    dataset = data_loader.DataSet(
        path_csv='../data/train.csv',
        path_folder_images='../data/train_images',
        path_folder_masks='../data/train_masks',
    )
    cars_as_string = dataset.df_cars.loc[0, 'PredictionString']
    item = data_loader.DataItem()
    item.set_cars_from_string(cars_as_string)
    cars_as_string2 = item.get_cars_as_string()
    # cars_as_string2 += '4'
    assert cars_as_string == cars_as_string2
Ejemplo n.º 11
0
    def test_limitSampleSize(self):
        conf = self.defaults._replace(train_rat=1.0, num_samples=10)
        ds = data_loader.DataSet(conf)

        for i in range(1, 5):
            x = np.arange(100)
            y = x % 3
            m = x % 5
            x, y, m = ds.limitSampleSize(x, y, m, i)
            assert len(x) == len(y) == len(m) == 3 * i
            assert set(y) == {0, 1, 2}

        x = np.arange(6)
        y = x % 3
        m = x % 5
        x2, y2, m2 = ds.limitSampleSize(x, y, m, 0)
        assert len(x2) == len(y2) == len(m2) == 0

        x2, y2, m2 = ds.limitSampleSize(x, y, m, 100)
        assert len(x2) == len(y2) == len(m2) == len(x)
Ejemplo n.º 12
0
def main():
    log_path = "./tb_logs"
    if (os.path.isdir(log_path)):
        shutil.rmtree(log_path)

    if FLAGS.dataset == "mnist":
        data = dl.DataSet(28, 28, "mnist",
                          "../datasets/mnist/train_images.mat",
                          "../datasets/mnist/test_images.mat",
                          "../datasets/mnist/train_labels.mat",
                          "../datasets/mnist/test_labels.mat")
        data.prepare_shape(FLAGS.num_categories)

    elif FLAGS.dataset == "svhn":
        data = dl.DataSet(32, 32, "svhn",
                          "../datasets/svhn_dataset/train_32x32.mat",
                          "../datasets/svhn_dataset/test_32x32.mat")
        data.prepare_shape(FLAGS.num_categories)

    data_height = data._height
    data_width = data._width

    num_dimensions = 1

    x = tf.placeholder(tf.float32, [None, data_height * data_width])
    y_ = tf.placeholder(tf.float32, [None, FLAGS.num_categories])

    output, keep_prob = nns.nn_4layers_ccff(x, num_dimensions, data_height,
                                            data_width, FLAGS.num_categories)

    sess = tf.InteractiveSession()

    with tf.name_scope("cross_entropy"):
        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))

    with tf.name_scope("loss_optmizer"):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope("accuracy"):
        correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar("cross_entropy", cross_entropy)
    tf.summary.scalar("accuracy", accuracy)

    summarize_all = tf.summary.merge_all()

    sess.run(tf.global_variables_initializer())

    tbWriter = tf.summary.FileWriter(log_path, sess.graph)

    start_time = time.time()
    end_time = time.time()

    for i in range(FLAGS.num_steps):
        batch_data, batch_labels = data.load_batch(FLAGS.batch_size)
        _, summary = sess.run([train_step, summarize_all],
                              feed_dict={
                                  x: batch_data,
                                  y_: batch_labels,
                                  keep_prob: 0.5
                              })

        if i % FLAGS.display_step == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x: batch_data,
                y_: batch_labels,
                keep_prob: 1.0
            })
            end_time = time.time()
            print(
                "step {0}, elapsed time {1:.2f} seconds, training accuracy {2:.3f}%"
                .format(i, end_time - start_time, train_accuracy * 100.0))
            tbWriter.add_summary(summary, i)

    end_time = time.time()
    print("Total training time for {0} batches: {1:.2f} seconds".format(
        i + 1, end_time - start_time))

    test_data, test_labels = data.load_test_data()
    print("Test accuracy {0:.3f}%".format(
        accuracy.eval(feed_dict={
            x: test_data,
            y_: test_labels,
            keep_prob: 1.0
        }) * 100.0))

    saver = tf.train.Saver()
    saver.save(sess, '../trained_models/' + FLAGS.model_path)
    saver.export_meta_graph('../trained_models/' + FLAGS.model_path + '.meta')

    sess.close()
Ejemplo n.º 13
0
                    t=T_stride,
                    train_overlap=T_overlop,
                    noise_type=NOISE_TYPE[3],
                    train_divide=TRAIN_DIVIDE,
                    noise_proportion=noise_proportion,
                    noise_path=NOISE_PATH,
                    seed=seed)
                train_X_features = feature_extractor.get_features(
                    FEATURES_TO_USE, train_X)
                valid_features_dict = {}
                for _, i in enumerate(val_dict):
                    X1 = feature_extractor.get_features(
                        FEATURES_TO_USE, val_dict[i]['X'])
                    valid_features_dict[i] = {'X': X1, 'y': val_dict[i]['y']}

                train_data = data_loader.DataSet(train_X_features, train_y)
                train_loader = DataLoader(train_data,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True)

                model = MODEL.MACNN(attention_head, attention_hidden)

                if torch.cuda.is_available():
                    model = model.cuda()

                criterion = nn.CrossEntropyLoss()
                optimizer = optim.Adam(model.parameters(),
                                       lr=learning_rate,
                                       weight_decay=1e-6)

                maxWA = 0
Ejemplo n.º 14
0
def train(
    model,
    device,
    params,
):
    # define training dataset
    dataset = data_loader.DataSet(
        path_csv=params['datasets']['train']['path_csv'],
        path_folder_images=params['datasets']['train']['path_folder_images'],
        path_folder_masks=params['datasets']['train']['path_folder_masks'],
    )
    dataset_torch = data_loader_torch.DataSetTorch(
        dataset,
        params,
        flag_load_label=True,
        flag_augment=params['train']['use_augmentation'],
    )
    dataset_loader = torch.utils.data.DataLoader(
        dataset=dataset_torch,
        batch_size=params['train']['batch_size'],
        shuffle=True,
        num_workers=4,
        pin_memory=True,  # see https://pytorch.org/docs/stable/data.html
    )

    # define optimizer and decrease learning rate by 0.1 every 3 epochs
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=params['train']['learning_rate']['initial'],
    )
    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer,
        step_size=params['train']['learning_rate']['num_epochs_const'],
        gamma=params['train']['learning_rate']['factor_decrease'],
    )

    # for each epoch...
    df_out = pd.DataFrame()
    for idx_epoch in range(params['train']['num_epochs']):
        print("Training epoch {}".format(idx_epoch))

        # set model to train (affects e.g. dropout layers) and disable unnecessary grad computation
        model.train()
        torch.set_grad_enabled(True)
        torch.cuda.empty_cache()  # empty cuda cache to prevent memory errors
        gc.collect(
        )  # empty unreferenced objects at the end. Not sure whether better at the end?

        # calculate loss for whole dataset
        num_batches = len(dataset_loader)
        loss_per_name = dict()
        dataset_tqdm = tqdm(dataset_loader)
        for img_batch, mask_batch, heatmap_batch, regr_batch in dataset_tqdm:

            # concat img and mask and perform inference
            input = torch.cat([img_batch, mask_batch],
                              1)  # nbatch, nchannels, height, width
            output = model(input.to(device))

            # calculate loss
            batch_loss_per_name = calc_loss(
                output,
                heatmap_batch.to(device),
                regr_batch.to(device),
                params['train']['loss'],
            )
            for name, batch_loss in batch_loss_per_name.items():
                if name not in loss_per_name:
                    loss_per_name[name] = 0
                loss_per_name[name] += batch_loss.data

            # change tqdm progress bar description
            description = "loss: "
            for name, batch_loss in batch_loss_per_name.items():
                description += "{}={:.3f} ".format(
                    name,
                    batch_loss.data.cpu().numpy())
            dataset_tqdm.set_description(description)

            # perform optimization
            batch_loss_per_name['tot'].backward(
            )  # computes x.grad += dloss/dx for all parameters x
            optimizer.step()  # updates values x += -lr * x.grad
            optimizer.zero_grad()  # set x.grad = 0, for next iteration

        # step learning rate after each epoch (not after each batch)
        lr_scheduler.step()

        # calculate average and store results
        for name in loss_per_name.keys():
            loss_per_name[name] = loss_per_name[name].cpu().numpy() / len(
                dataset_loader)
            df_out.loc[idx_epoch, 'loss_' + name] = loss_per_name[name]
        values_per_name = evaluate(model, device, params)
        for key, value in values_per_name.items():
            df_out.loc[idx_epoch, 'valid_' + key] = value

        # save history
        path_csv = os.path.join(
            params['path_folder_out'],
            'train_history_{}.csv'.format(idx_epoch),
        )
        os.makedirs(os.path.dirname(path_csv), exist_ok=True)
        df_out.to_csv(path_csv, sep=';')
        print(df_out)

        # save model weights
        path_weights = os.path.join(
            params['path_folder_out'],
            'model_{}.pth'.format(idx_epoch),
        )
        torch.save(model.state_dict(), path_weights)
    return df_out
Ejemplo n.º 15
0
 def test_basic_err(self):
     with pytest.raises(AssertionError):
         data_loader.DataSet(self.defaults._replace(train_rat=-1.0))
     with pytest.raises(AssertionError):
         data_loader.DataSet(self.defaults._replace(train_rat=1.1))
Ejemplo n.º 16
0
def predict(model,
            device,
            params,
            ):
    # Create data generators - they will produce batches
    dataset_test = data_loader.DataSet(
        path_csv=params['datasets']['test']['path_csv'],
        path_folder_images=params['datasets']['test']['path_folder_images'],
        path_folder_masks=params['datasets']['test']['path_folder_masks'],
    )
    dataset_torch_test = data_loader_torch.DataSetTorch(
        dataset_test, params,
        flag_load_label=False,
        flag_augment=False,
    )
    data_loader_test = torch.utils.data.DataLoader(
        dataset=dataset_torch_test,
        batch_size=params['predict']['batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=True,  # see https://pytorch.org/docs/stable/data.html
    )

    # perform predictions
    predictions = []
    model.eval()
    idx_batch = -1
    for img, mask, _, _ in tqdm(data_loader_test):
        idx_batch += 1
        if idx_batch > params['predict']['num_batches_max']:
            print("Ending early because of param num_batches_max={}".format(params['predict']['num_batches_max']))
            break

        # perform prediction
        with torch.no_grad():
            # concat img and mask and perform inference
            input = torch.cat([img, mask], 1)  # nbatch, nchannels, height, width
            output = model(input.to(device))
        output = output.data.cpu().numpy()

        # extract cars as string from each element in batch
        num_elems_in_batch = output.shape[0]
        for idx_elem_in_batch in range(num_elems_in_batch):
            idx_id = idx_batch * params['predict']['batch_size'] + idx_elem_in_batch
            id = dataset_test.list_ids[idx_id]

            # get mat from output and plot
            mat = output[idx_elem_in_batch, ...]
            mat = np.rollaxis(mat, 0, 3)  # reverse rolling backwards
            if params['predict']['flag_plot_mat']:
                # convert image to numpy
                img_numpy = img.data.cpu().numpy()
                img_numpy = img_numpy[idx_elem_in_batch, ...]
                img_numpy = np.rollaxis(img_numpy, 0, 3)  # reverse rolling backwards
                img_numpy = img_numpy[:, :, ::-1]  # BGR to RGB

                fig, ax = plt.subplots(2, 1, figsize=(10, 10))
                ax[0].imshow(img_numpy)
                ax_mask = ax[1].imshow(mat[:, :, 0], cmap='PiYG', vmin=-1, vmax=+1)
                fig.colorbar(ax_mask, ax=ax[1])
                if False:  # only use in case of multiple axes, here x,y,z
                    ax[2].imshow(mat[:, :, 4])
                    ax[3].imshow(mat[:, :, 5])
                    ax[4].imshow(mat[:, :, 7])
                for axi, label in zip(ax, ['img', 'mask']):  # , 'x', 'y', 'z']):
                    axi.set_ylabel(label)
                fig.suptitle('ImageID={}'.format(id))

                # save
                path_out = os.path.join(params['path_folder_out'],
                                        'pred_mat',
                                        '{:05d}.png'.format(idx_id),
                                        )
                os.makedirs(os.path.dirname(path_out), exist_ok=True)
                fig.savefig(path_out)
                plt.close()

            # convert mat to item and plot.
            item = dataset_torch_test.convert_mat_to_item(mat)
            if params['predict']['flag_optimize']:
                for idx_car, car in enumerate(item.cars):
                    x_new, y_new, z_new, is_marked = optimize_xyz(car.v, car.u, car.x, car.y, car.z, params)
                    item.cars[idx_car].x = x_new
                    item.cars[idx_car].y = y_new
                    item.cars[idx_car].z = z_new
                    item.cars[idx_car].is_marked = is_marked

            if params['predict']['flag_plot_item']:
                item_org = dataset_test.load_item(id)
                item.img = item_org.img
                item.mask = np.zeros((1, 1))
                fig, ax = item.plot()
                fig.suptitle('ImageID={}'.format(id))

                if idx_batch == 2:
                    num_cars = len(item.cars)
                    plt.show()

                # save
                path_out = os.path.join(params['path_folder_out'],
                                        'pred_item',
                                        '{:05d}.png'.format(idx_id),
                                        )
                os.makedirs(os.path.dirname(path_out), exist_ok=True)
                fig.savefig(path_out)
                plt.close()

            # extract prediction string from item
            string = item.get_cars_as_string(flag_submission=True)
            predictions.append(string)

    # predictions to csv
    df_out = pd.DataFrame()
    df_out['ImageId'] = dataset_test.list_ids
    df_out.loc[0:len(predictions) - 1, 'PredictionString'] = predictions
    print(df_out.head())
    path_csv = os.path.join(params['path_folder_out'], 'predictions.csv')
    df_out.to_csv(path_csv, sep=',', index=False)

    return df_out
        # return
        uv = np.array([u, v])
        return uv


if __name__ == '__main__':
    params = {'model': {'factor_downsample': 4,
                        'input_height': 512,
                        'input_width': 1536,
                        },
              'train': {'loss': {'flag_focal_loss': 1}},
              'datasets': {'flag_use_mask': 1},
              }
    dataset = data_loader.DataSet(path_csv='../data/train.csv',
                                  path_folder_images='../data/train_images',
                                  path_folder_masks='../data/train_masks',
                                  )
    dataset_torch = DataSetTorch(dataset, params, flag_augment=True)
    num_items = len(dataset_torch)
    for idx_item in tqdm(range(num_items)):
        [img, mask, heatmap, regr] = dataset_torch[idx_item]

        # reverse rolling backwards
        img = np.rollaxis(img, 0, 3)
        mask = np.rollaxis(mask, 0, 3)
        regr = np.rollaxis(regr, 0, 3)
        print(img.shape)
        print(regr.shape)

        # plot example
        if False: