def __init__(self, transform=None, train=True, val=False, use_gt_bb=False):

        # Decision: Always using random parameters (if not val) and saved tensors

        super().__init__("irrelevant", train=train, val=val)

        assert train # should not be used for testing

        if val == True:
            use_random = False
        else:
            use_random = True

        self.pennaction = PennActionDataset("/data/mjakobs/data/pennaction/", use_random_parameters=use_random, train=train, val=val, use_gt_bb=True, use_saved_tensors=True, augmentation_amount=6)
        self.mpii = MPIIDataset("/data/mjakobs/data/mpii/", use_random_parameters=use_random, use_saved_tensors=True, train=train, val=val, augmentation_amount=3)

        self.padding_amount = 8

        self.sample_amount = 11 # this leads to a approximate 2/3 mpii 1/3 pennaction mix
Exemple #2
0
def plot_example_mpii():
    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     train=True,
                     val=False,
                     use_random_parameters=False,
                     use_saved_tensors=False)
    plot_index = 1
    for i in range(102, 502, 100):
        entry = ds[i]
        image_number = "{}".format(int(entry["image_path"][0].item()))
        image_name = "{}.jpg".format(image_number.zfill(9))
        image = io.imread(
            "/data/mjakobs/data/mpii/images/{}".format(image_name))
        plt.subplot(2, 2, plot_index)
        plot_index += 1
        plt.imshow(image)
        plt.xticks([])
        plt.yticks([])

    plt.show()
Exemple #3
0
def extract_mpii_pose(model_path, output_path):
    ds = MPIIDataset("/data/mjakobs/data/mpii/", train=True, val=True, use_random_parameters=False)
    model = Mpii_8(num_context=2)
    model.load_state_dict(torch.load(model_path, map_location="cpu"))
    model.eval()

    for i in range(len(ds)):
        entry = ds[i]
        frames = entry["normalized_image"].unsqueeze(0)
        matrices = entry["trans_matrix"]
        image_number = entry["image_path"].item()
        image_number = "{}".format(image_number).zfill(9)
        image_path = "/data/mjakobs/data/mpii/images/{}.jpg".format(image_number)
        _, predicted_poses, _, _ = model(frames)

        path = '{}/{}.png'.format(output_path, i)
        image = io.imread(image_path)
        plt.imshow(image)

        pose = predicted_poses.squeeze(0).squeeze(0)

        plot(pose.detach(), matrices, path)
Exemple #4
0
def create_fragments_mpii(train=False,
                          val=False,
                          use_random=False,
                          subprefix="1",
                          split=1):
    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     use_random_parameters=use_random,
                     use_saved_tensors=False,
                     train=train,
                     val=val)

    print("-" * 50)
    print("Train: {}, Val: {}, Split: {}, Random: {}".format(
        train, val, split, use_random))
    print("-" * 50)

    length = len(ds)
    current = 0

    assert train == True

    root_dir = "/data/mjakobs/data/mpii/"

    if use_random:
        prefix = "rand{}_".format(subprefix)
    else:
        prefix = ""

    all_indices = list(range(len(ds)))

    if val:
        train_test_folder = "val/"
    else:
        train_test_folder = "train/"

    for counter, idx in enumerate(all_indices):
        entry = ds[idx]
        frame = entry["normalized_image"]
        pose = entry["normalized_pose"]
        matrix = entry["trans_matrix"]
        bbox = entry["bbox"]
        headsize = entry["head_size"]
        parameters = entry["parameters"]
        image_path = entry["image_path"]

        frame = ((frame + 1) / 2.0) * 255.0
        frame = frame.byte()

        pose[:, 0:2] = pose[:, 0:2] * 255.0
        pose = pose.int()

        original_image = ds.indices[idx]
        padded_original_image = str(original_image).zfill(8)

        torch.save(
            frame, root_dir + train_test_folder + prefix + "images/" +
            padded_original_image + ".frame.pt")
        torch.save(
            headsize, root_dir + train_test_folder + prefix + "annotations/" +
            padded_original_image + ".headsize.pt")
        torch.save(
            pose, root_dir + train_test_folder + prefix + "annotations/" +
            padded_original_image + ".pose.pt")
        torch.save(
            matrix, root_dir + train_test_folder + prefix + "annotations/" +
            padded_original_image + ".matrix.pt")
        torch.save(
            bbox, root_dir + train_test_folder + prefix + "annotations/" +
            padded_original_image + ".bbox.pt")
        torch.save(
            parameters, root_dir + train_test_folder + prefix +
            "annotations/" + padded_original_image + ".parameters.pt")
        torch.save(
            image_path, root_dir + train_test_folder + prefix +
            "annotations/" + padded_original_image + ".image_path.pt")
Exemple #5
0
def quantitative_evaluation():
    variances = [1, 2, 5, 10, 20, 50]
    variance_accuracies_2 = np.zeros(len(variances))
    variance_accuracies_1 = np.zeros(len(variances))
    variance_accuracies_3 = np.zeros(len(variances))
    variance_accuracies_4 = np.zeros(len(variances))
    valid_joints = np.zeros(len(variances))

    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     use_saved_tensors=True,
                     train=True,
                     val=False,
                     use_random_parameters=False)

    nr_objects = 1000

    original_model = build_softargmax_2d((255, 255, 1))

    indices = list(range(len(ds)))
    random.shuffle(indices)
    indices = indices[:nr_objects]

    for i in range(nr_objects):
        example = ds[i]
        pose = example["normalized_pose"]
        for idx, cov in enumerate(variances):
            for joint in pose:

                x = np.rint([joint[0].item() * 255])[0]
                y = np.rint([joint[1].item() * 255])[0]

                if joint[2] == 0:
                    continue

                heatmap = create_2d_normal_image((x, y), cov, 255, 255)

                output = original_model.predict(heatmap.reshape(
                    1, 255, 255, 1))

                [scale_x, scale_y] = output[0][0]
                pred_x, pred_y = np.rint([scale_x * 255.0
                                          ])[0], np.rint([scale_y * 255.0])[0]

                difference_x = abs(pred_x - x)
                difference_y = abs(pred_y - y)

                variance_accuracies_1[idx] = variance_accuracies_1[idx] + (
                    difference_x <= 1 and difference_y <= 1)
                variance_accuracies_2[idx] = variance_accuracies_2[idx] + (
                    difference_x <= 2 and difference_y <= 2)
                variance_accuracies_3[idx] = variance_accuracies_3[idx] + (
                    difference_x <= 3 and difference_y <= 3)
                variance_accuracies_4[idx] = variance_accuracies_4[idx] + (
                    difference_x <= 4 and difference_y <= 4)

                valid_joints[idx] = valid_joints[idx] + 1

    print("1", variance_accuracies_1 / valid_joints.astype(np.float32))
    print("2", variance_accuracies_2 / valid_joints.astype(np.float32))
    print("3", variance_accuracies_3 / valid_joints.astype(np.float32))
    print("4", variance_accuracies_4 / valid_joints.astype(np.float32))
def run_experiment_mpii(conf):
    learning_rate = conf["learning_rate"]
    nr_epochs = conf["nr_epochs"]
    validation_amount = conf["validation_amount"]
    limit_data_percent = conf["limit_data_percent"]
    numpy_seed = conf["numpy_seed"]
    num_blocks = conf["num_blocks"]
    name = conf["name"]
    batch_size = conf["batch_size"]
    val_batch_size = conf["val_batch_size"]
    use_saved_tensors = conf["use_saved_tensors"]
    nr_context = conf["nr_context"]
    project_dir = conf["project_dir"]

    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     use_random_parameters=False,
                     use_saved_tensors=use_saved_tensors)

    if num_blocks == 1:
        model = Mpii_1(num_context=nr_context).to(device)
    elif num_blocks == 2:
        model = Mpii_2(num_context=nr_context).to(device)
    elif num_blocks == 4:
        model = Mpii_4(num_context=nr_context).to(device)
    elif num_blocks == 8:
        model = Mpii_8(num_context=nr_context).to(device)

    number_of_datapoints = int(len(ds) * limit_data_percent)
    indices = list(range(number_of_datapoints))
    split = int((1 - validation_amount) * number_of_datapoints)

    np.random.seed(numpy_seed)
    np.random.shuffle(indices)

    train_indices = indices[:split]
    val_indices = indices[split:]

    print("Using {} training and {} validation datapoints".format(
        len(train_indices), len(val_indices)))

    train_sampler = SubsetRandomSampler(train_indices)
    val_sampler = SubsetRandomSampler(val_indices)

    train_loader = data.DataLoader(ds,
                                   batch_size=batch_size,
                                   sampler=train_sampler)

    val_loader = data.DataLoader(ds,
                                 batch_size=val_batch_size,
                                 sampler=val_sampler)

    optimizer = optim.RMSprop(model.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'max',
                                                     verbose=True,
                                                     patience=1,
                                                     eps=0)

    if name is not None:
        experiment_name = name
        if project_dir != "":
            experiment_name = project_dir + "/" + experiment_name
    else:
        experiment_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    create_if_not_exists("experiments")
    create_if_not_exists("experiments/{}".format(experiment_name))
    create_if_not_exists("experiments/{}/weights".format(experiment_name))

    remove_if_exists("experiments/{}/validation.csv".format(experiment_name))

    with open('experiments/{}/parameters.csv'.format(experiment_name),
              'w+') as parameter_file:
        parameter_file.write("paramter_name,value\n")
        parameter_file.write("learning_rate,{}\n".format(learning_rate))
        parameter_file.write("batch_size,{}\n".format(batch_size))
        parameter_file.write(
            "number_of_datapoints,{}\n".format(number_of_datapoints))
        parameter_file.write(
            "limit_data_percent,{}\n".format(limit_data_percent))
        parameter_file.write("numpy_seed,{}\n".format(numpy_seed))
        parameter_file.write("num_blocks,{}\n".format(num_blocks))
        parameter_file.write("nr_epochs,{}\n".format(nr_epochs))

    with open("experiments/{}/validation.csv".format(experiment_name),
              mode="w") as val_file:
        val_writer = csv.writer(val_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
        val_writer.writerow(['iteration', 'pckh_0.5', 'pckh_0.2'])

    with open('experiments/{}/loss.csv'.format(experiment_name),
              mode='w') as output_file:
        writer = csv.writer(output_file,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)
        writer.writerow(['epoch', 'batch_nr', 'iteration', 'loss'])

        debugging = False
        iteration = 0
        for epoch in range(nr_epochs):

            model.train()
            for batch_idx, train_objects in enumerate(train_loader):

                images = train_objects["normalized_image"].to(device)
                poses = train_objects["normalized_pose"].to(device)

                heatmaps, output = model(images)

                output = output.permute(1, 0, 2, 3)
                # output shape: (batch_size, num_blocks, 16, 3)
                pred_pose = output[:, :, :, 0:2]
                ground_pose = poses[:, :, 0:2]
                ground_pose = ground_pose.unsqueeze(1)
                ground_pose = ground_pose.expand(-1, num_blocks, -1, -1)

                if debugging and iteration > 50:
                    # show prediction for first in batch
                    plt.subplot(221)
                    image = (images[0].reshape((256, 256, 3)) + 1) / 2.0
                    plt.imshow(image)

                    pred_detach = pred_pose.detach().numpy()

                    plt.subplot(222)
                    plt.imshow(image)
                    plt.scatter(x=pred_detach[0, -1, :, 0] * 255.0,
                                y=pred_detach[0, -1, :, 1] * 255.0,
                                c="g")
                    plt.scatter(x=ground_pose[0, -1, :, 0] * 255.0,
                                y=ground_pose[0, -1, :, 1] * 255.0,
                                c="r")

                    heatmaps_detached = heatmaps.detach().numpy()
                    plt.subplot(223)
                    # heatmap left wrist
                    heatmap_lr = resize(heatmaps_detached[0, -1], (256, 256))
                    plt.imshow(image)
                    plt.imshow(heatmap_lr, alpha=0.5)
                    plt.scatter(x=pred_detach[0, -1, -1, 0] * 255.0,
                                y=pred_detach[0, -1, -1, 1] * 255.0,
                                c="#000000")

                    plt.subplot(224)
                    # heatmap head top
                    heatmap_head_top = resize(heatmaps_detached[0, 9],
                                              (256, 256))
                    plt.imshow(image)
                    plt.imshow(heatmap_head_top, alpha=0.5)
                    plt.scatter(x=pred_detach[0, -1, 9, 0] * 255.0,
                                y=pred_detach[0, -1, 9, 1] * 255.0,
                                c="#000000")

                    plt.show()

                pred_vis = output[:, :, :, 2]
                ground_vis = poses[:, :, 2]
                ground_vis = ground_vis.unsqueeze(1)
                ground_vis = ground_vis.expand(-1, num_blocks, -1)

                binary_crossentropy = nn.BCELoss()

                vis_loss = binary_crossentropy(pred_vis, ground_vis)

                pose_loss = elastic_net_loss_paper(pred_pose, ground_pose)
                loss = vis_loss * 0.01 + pose_loss

                loss.backward()

                optimizer.step()
                optimizer.zero_grad()

                iteration = iteration + 1

                print("iteration {} loss {}".format(iteration, loss.item()))
                writer.writerow([epoch, batch_idx, iteration, loss.item()])
                output_file.flush()

                if iteration % 500 == 0:
                    # evaluate

                    val_accuracy_05 = []
                    val_accuracy_02 = []

                    model.eval()

                    if not exists('experiments/{}/val_images'.format(
                            experiment_name)):
                        makedirs('experiments/{}/val_images'.format(
                            experiment_name))

                    with torch.no_grad():
                        if not exists('experiments/{}/heatmaps/{}'.format(
                                experiment_name, iteration)):
                            makedirs('experiments/{}/heatmaps/{}'.format(
                                experiment_name, iteration))
                        else:
                            rmtree('experiments/{}/heatmaps/{}'.format(
                                experiment_name, iteration))
                            makedirs('experiments/{}/heatmaps/{}'.format(
                                experiment_name, iteration))

                        if not exists('experiments/{}/val_images/{}'.format(
                                experiment_name, iteration)):
                            makedirs('experiments/{}/val_images/{}'.format(
                                experiment_name, iteration))
                        else:
                            rmtree('experiments/{}/val_images/{}'.format(
                                experiment_name, iteration))
                            makedirs('experiments/{}/val_images/{}'.format(
                                experiment_name, iteration))

                        for batch_idx, val_data in enumerate(val_loader):
                            val_images = val_data["normalized_image"].to(
                                device)

                            heatmaps, predictions = model(val_images)
                            predictions = predictions[-1, :, :, :].squeeze(
                                dim=0)

                            if predictions.dim() == 2:
                                predictions = predictions.unsqueeze(0)

                            image_number = "{}".format(
                                int(val_data["image_path"][0].item()))
                            image_name = "{}.jpg".format(image_number.zfill(9))
                            image = io.imread(
                                "/data/mjakobs/data/mpii/images/{}".format(
                                    image_name))

                            if batch_idx % 10 == 0:
                                #visualize_heatmaps(heatmaps[0], val_images[0], 'experiments/{}/heatmaps/{}/{}_hm.png'.format(experiment_name, iteration, batch_idx), save=True)
                                show_predictions_ontop(
                                    val_data["normalized_pose"][0],
                                    image,
                                    predictions[0],
                                    'experiments/{}/val_images/{}/{}.png'.
                                    format(experiment_name, iteration,
                                           batch_idx),
                                    val_data["trans_matrix"][0],
                                    bbox=val_data["bbox"][0],
                                    save=True)

                            scores_05, scores_02 = eval_pckh_batch(
                                predictions, val_data["normalized_pose"],
                                val_data["head_size"],
                                val_data["trans_matrix"])
                            val_accuracy_05.extend(scores_05)
                            val_accuracy_02.extend(scores_02)

                        mean_05 = np.mean(np.array(val_accuracy_05))
                        mean_02 = np.mean(np.array(val_accuracy_02))

                        #print([iteration, loss.item(), mean_05, mean_02])

                        with open("experiments/{}/validation.csv".format(
                                experiment_name),
                                  mode="a") as val_file:
                            val_writer = csv.writer(val_file,
                                                    delimiter=',',
                                                    quotechar='"',
                                                    quoting=csv.QUOTE_MINIMAL)
                            val_writer.writerow([iteration, mean_05, mean_02])
Exemple #7
0
def plot_mpii_augmented():
    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     train=True,
                     val=False,
                     use_random_parameters=False,
                     use_saved_tensors=False)
    ds_augmented = MPIIDataset("/data/mjakobs/data/mpii/",
                               train=True,
                               val=False,
                               use_random_parameters=True,
                               use_saved_tensors=False)

    ds_augmented.angles = np.array([-15])
    ds_augmented.scales = np.array([1.3])
    ds_augmented.flip_horizontal = np.array([1])

    entry = ds[20]
    image = entry["normalized_image"]
    image = image.permute(1, 2, 0)
    image = (image + 1) / 2.0

    plt.subplot(1, 4, 3)
    plt.imshow(image)

    pose = entry["normalized_pose"]
    scaled_pose = pose * 255.0

    for i, (src, dst) in enumerate(joint_mapping):
        plt.plot([scaled_pose[src][0], scaled_pose[dst][0]],
                 [scaled_pose[src][1], scaled_pose[dst][1]],
                 lw=1,
                 c="#00FFFF")
        plt.scatter(scaled_pose[src][0],
                    scaled_pose[src][1],
                    s=10,
                    c="#FF00FF")
        plt.scatter(scaled_pose[dst][0],
                    scaled_pose[dst][1],
                    s=10,
                    c="#FF00FF")

    plt.xticks([])
    plt.yticks([])

    image_number = "{}".format(int(entry["image_path"][0].item()))
    image_name = "{}.jpg".format(image_number.zfill(9))
    image = io.imread("/data/mjakobs/data/mpii/images/{}".format(image_name))
    plt.subplot(1, 4, 1)
    plt.imshow(image)
    plt.xticks([])
    plt.yticks([])

    plt.subplot(1, 4, 2)
    plt.imshow(image)
    matrix = entry["trans_matrix"]
    pose = transform_pose(matrix,
                          entry["normalized_pose"][:, 0:2],
                          inverse=True)
    scaled_pose = pose

    for i, (src, dst) in enumerate(joint_mapping):
        plt.plot([scaled_pose[src][0], scaled_pose[dst][0]],
                 [scaled_pose[src][1], scaled_pose[dst][1]],
                 lw=1,
                 c="#00FFFF")
        plt.scatter(scaled_pose[src][0],
                    scaled_pose[src][1],
                    s=10,
                    c="#FF00FF")
        plt.scatter(scaled_pose[dst][0],
                    scaled_pose[dst][1],
                    s=10,
                    c="#FF00FF")
    plt.xticks([])
    plt.yticks([])

    entry = ds_augmented[20]
    image = entry["normalized_image"]
    image = image.permute(1, 2, 0)
    image = (image + 1) / 2.0

    plt.subplot(1, 4, 4)
    plt.imshow(image)
    pose = entry["normalized_pose"]
    scaled_pose = pose * 255.0

    for i, (src, dst) in enumerate(joint_mapping):
        plt.plot([scaled_pose[src][0], scaled_pose[dst][0]],
                 [scaled_pose[src][1], scaled_pose[dst][1]],
                 lw=1,
                 c="#00FFFF")
        plt.scatter(scaled_pose[src][0],
                    scaled_pose[src][1],
                    s=10,
                    c="#FF00FF")
        plt.scatter(scaled_pose[dst][0],
                    scaled_pose[dst][1],
                    s=10,
                    c="#FF00FF")

    plt.xticks([])
    plt.yticks([])

    plt.show()
Exemple #8
0
    os.makedirs(output_folder + "/" + path)

random.seed(1)
np.random.seed(1)
torch.manual_seed(1)

for scenario_idx, scenario in enumerate(scenarios):

    train = scenario[0]
    val = scenario[1]
    use_random = scenario[2]
    saved = scenario[3]

    ds = MPIIDataset("/data/mjakobs/data/mpii/",
                     train=train,
                     val=val,
                     use_random_parameters=use_random,
                     use_saved_tensors=saved)

    all_indices = list(range(len(ds)))
    random.seed(1)
    random.shuffle(all_indices)
    test_indices = all_indices[:2]

    print("train {} val {} random {} saved {}".format(train, val, use_random,
                                                      saved))

    for idx in test_indices:

        entry = ds[idx]