Example #1
0
def gen_data(POSE_OFFSET, PARAMS_TO_OFFSET, smpl, data_samples=10000, save_dir=None, render_silhouette=True):
    """ Generate random body poses """
    POSE_OFFSET = format_distractor_dict(POSE_OFFSET, PARAMS_TO_OFFSET)

    zero_params = np.zeros(shape=(85,))
    zero_pc = smpl.set_params(beta=zero_params[72:82], pose=zero_params[0:72].reshape((24,3)), trans=zero_params[82:85])
    #print("zero_pc: " + str(zero_pc))

    # Generate and format the data
    X_indices = np.array([i for i in range(data_samples)])
    X_params = np.array([zero_params for i in range(data_samples)], dtype="float32")
    if not all(value == 0.0 for value in POSE_OFFSET.values()):
        X_params = offset_params(X_params, PARAMS_TO_OFFSET, POSE_OFFSET)
        X_pcs = np.array([smpl.set_params(beta=params[72:82], pose=params[0:72], trans=params[82:85]) for params in X_params])
    else:
        X_pcs = np.array([zero_pc for i in range(data_samples)], dtype="float32")

    if render_silhouette:
        X_silh = []
        print("Generating silhouettes...")
        for pc in tqdm(X_pcs):
            # Render the silhouette from the point cloud
            silh = Mesh(pointcloud=pc).render_silhouette(show=False)
            X_silh.append(silh)

        X_silh = np.array(X_silh)
        print("Finished generating data.")

    if save_dir is not None:
        # Save the generated data in the given location
        print("Saving generated samples...")
        for i in tqdm(range(data_samples)):
            sample_id = "sample_{:05d}".format(i+1)
            if render_silhouette:
                np.savez(save_dir + sample_id + ".npz", smpl_params=X_params[i], pointcloud=X_pcs[i], silhouette=X_silh[i])
            else:
                np.savez(save_dir + sample_id + ".npz", smpl_params=X_params[i], pointcloud=X_pcs[i], silhouette=X_silh[i])

        print("Finished saving.")

    if render_silhouette:
        return X_params, X_pcs, X_silh
    else:
        return X_params, X_pcs
Example #2
0
def gather_cb_data(X_data, Y_data, data_samples, num_cb_samples=5, where="spread"):
    """ Gather data for callbacks """
    if where == "spread":
        cb_samples = np.linspace(0, data_samples, num_cb_samples, dtype=int)
        cb_samples[-1] -= 1
    elif where == "front":
        cb_samples = [i for i in range(num_cb_samples)]
    elif where == "back":
        cb_samples = [i for i in range(data_samples - num_cb_samples, data_samples)]
    print("samples for visualisation callback: " + str(cb_samples))
    X_cb = [entry[cb_samples] for entry in X_data]
    Y_cb = [entry[cb_samples] for entry in Y_data]
    cb_pcs = X_cb[2]
    silh_cb = []
    for pc in cb_pcs:
        silh = Mesh(pointcloud=pc).render_silhouette(show=False)
        silh_cb.append(silh)

    return X_cb, Y_cb, silh_cb
            np.zeros((data_samples, 31))
        ]
    else:
        raise ValueError(
            "Architecture '{}' not recognised".format(ARCHITECTURE))

# Render silhouettes for the callback data
num_cb_samples = 5
cb_samples = np.linspace(0, data_samples, num_cb_samples, dtype=int)
cb_samples[-1] -= 1
X_cb = [entry[cb_samples] for entry in X_data]
Y_cb = [entry[cb_samples] for entry in Y_data]
cb_pcs = X_cb[2]
silh_cb = []
for pc in cb_pcs:
    silh = Mesh(pointcloud=pc).render_silhouette(show=False)
    silh_cb.append(silh)

# Validation data
#num_val_samples = 100
#val_samples = np.linspace(0, data_samples-1, num_val_samples, dtype=int)
#X_val = [entry[val_samples] for entry in X_data]
#Y_val = [entry[val_samples] for entry in Y_data]
""" Model set-up """

# Model setup parameters - ARCHITECTURE and BATCH_SIZE already defined above
EPOCHS = setup_params["MODEL"]["EPOCHS"]
INPUT_TYPE = setup_params["MODEL"]["INPUT_TYPE"]
learning_rate = setup_params["MODEL"]["LEARNING_RATE"]
DELTA_D_LOSS_WEIGHT = setup_params["MODEL"]["DELTA_D_LOSS_WEIGHT"]
PC_LOSS_WEIGHT = setup_params["MODEL"]["PC_LOSS_WEIGHT"]
Example #4
0
X_data = [np.array(X_indices), np.array(X_params), np.array(X_pcs)]
Y_data = architecture_output_array(ARCHITECTURE, data_samples)

x_test = X_data
y_test = Y_data

# Render silhouettes for the callback data
num_samples = 5
cb_indices = X_indices[:num_samples]
cb_params = X_params[:num_samples]
cb_pcs = X_pcs[:num_samples]
X_cb = [np.array(cb_indices), np.array(cb_params), np.array(cb_pcs)]
silh_cb = []
for pc in cb_pcs:
    silh = Mesh(pointcloud=pc).render_silhouette(show=False)
    silh_cb.append(silh)

# Initialise the embedding layer
def emb_init_weights_np(emb_params, distractor=np.pi):
    def emb_init_wrapper(param, offset=False):
        def emb_init(shape, dtype="float32"):
            """ Initializer for the embedding layer """
            emb_params_ = emb_params[:, param]

            if offset:
                k = distractor
                offset_ = k["param_{:02d}".format(param)] * 2 * (np.random.rand(shape[0]) - 0.5)
                emb_params_[:] += offset_

            init = np.array(emb_params_, dtype=dtype).reshape(shape)
Example #5
0
# Load model
# encoder = Encoder()
#encoder.train_step(random_sample.reshape((1, *random_sample.shape, 1)), parameters.reshape(1, *parameters.shape))
# encoder.load_weights(args.model)
encoder_inputs, encoder_outputs = SimpleEncoderArchitecture((256, 256, 1))
encoder = Model(inputs=encoder_inputs, outputs=encoder_outputs)
encoder.summary()

encoder.load_weights(einfo['model_weights_path'])
# Predict the parameters from the silhouette and generate a mesh
prediction = encoder.predict(silhouette.reshape(1, 256, 256, 1))
prediction = tf.cast(prediction, tf.float64)
print("Shape of predictions:'" + str(prediction.shape))
print(prediction[0, 82:85])

pred_pc, faces = smpl_model("../model.pkl", prediction[0, 72:82] * 0 + 1,
                            prediction[0, :72], prediction[0, 82:85] * 0)
# Render the mesh
pred_mesh = Mesh(pointcloud=pred_pc.numpy())
pred_mesh.faces = faces

if mesh is not None:
    mesh.render3D()

print("Rendering prediction")
pred_mesh.render3D()

# Now render their silhouettes
# cv2.imshow("True silhouette", silhouette)
# pred_mesh.render_silhouette(title="Predicted silhouette")
    def on_epoch_begin(self, epoch, logs=None):
        import cv2
        outputs = self.model.predict(self.data)
        #print(outputs)

        gt_params = self.data[1]
        pred_params = outputs[0]
        print("GT and predicted parameters are equal: " +
              str(np.allclose(gt_params, pred_params)))

        #print(gt_params)
        #print(pred_params)
        #exit(1)

        gt_pc = self.data[2]
        #print(gt_pc)
        pred_pc = outputs[2]
        right_pred = Mesh(pointcloud=pred_pc[0]).render_silhouette(show=False)
        cv2.imwrite("right_pred.png", right_pred.astype("uint8"))
        wrong_pred = Mesh(pointcloud=pred_pc[10]).render_silhouette(show=False)
        cv2.imwrite("wrong_pred.png", wrong_pred.astype("uint8"))
        print_mesh("wrong_mesh.obj", pred_pc[10], faces)
        print_mesh("right_mesh.obj", pred_pc[0], faces)
        #print(pred_pc)
        print("GT and predicted point clouds are equal: " +
              str(np.allclose(gt_pc, pred_pc)))
        close = np.array([
            np.int(np.allclose(gt_pc[i], pred_pc[i]))
            for i in range(gt_pc.shape[0])
        ])
        #print([np.allclose(gt_pc[i], pred_pc[i]) for i in range(gt_pc.shape[0])])
        not_close = np.array([not value for value in close])
        close_sum = np.sum(close)
        #print("Num close pc: " + str(close_sum))
        not_close_gt = gt_pc[not_close]
        not_close_pred = pred_pc[not_close]
        #print(not_close_pred[0])
        #exit(1)
        diff_not_close = not_close_gt - not_close_pred
        import pandas as pd
        diff_df = pd.DataFrame(diff_not_close[10])
        diff_df.to_csv("diff.csv")
        dist_not_close = np.sum(np.square(diff_not_close), axis=-1)
        dist_df = pd.DataFrame(dist_not_close[10])
        dist_df.to_csv("dist.csv")
        mean_not_close = np.mean(dist_not_close, axis=1)
        #print(mean_not_close)

        gt_example = not_close_gt[10]
        pred_example = not_close_pred[10]

        gt_silh = Mesh(pointcloud=gt_example).render_silhouette(show=False)
        pred_silh = Mesh(pointcloud=pred_example).render_silhouette(show=False)
        diff_silh = (gt_silh != pred_silh) * 255
        all_silh = np.concatenate([gt_silh, pred_silh, diff_silh])
        cv2.imwrite("silh_comp.png", all_silh.astype("uint8"))

        #exit(1)
        euc_dist = np.square(np.subtract(gt_pc, pred_pc))
        #print(euc_dist[0])
        euc_dist_summed = np.sum(euc_dist, axis=-1)
        #print(euc_dist_summed[0])
        actual_loss = np.mean(euc_dist_summed, axis=1)
        #print(actual_loss)
        actual_loss_sum = np.sum(actual_loss, axis=0)
        #print(actual_loss_sum)

        mean_loss = np.mean(actual_loss)
        print("Calculated mean loss: {}".format(mean_loss))
        print("Model mean loss: {}".format(np.mean(outputs[3])))
    def __getitem__(self, item):
        """ Yield batches of data """
        # Load the SMPL model
        #smpl = SMPLModel('./keras_rotationnet_v2_demo_for_hidde/./basicModel_f_lbs_10_207_0_v1.0.0.pkl')
        smpl = self.smpl

        if self.debug:
            if self.debug_X is None:
                Y_batch_params = []
                Y_batch_pc = []
                X_batch = []
                for i in range(self.batch_size):
                    # Generate artificial data
                    pose = 0.65 * (np.random.rand(smpl.pose_shape[0], smpl.pose_shape[1]) - 0.5)
                    beta = 0.2 * (np.random.rand(smpl.beta_shape[0]) - 0.5)
                    trans = np.zeros(smpl.trans_shape[0])
                    #trans = 0.1 * (np.random.rand(smpl.trans_shape[0]) - 0.5)

                    # Create the body mesh
                    pointcloud = smpl.set_params(beta=beta, pose=pose, trans=trans)
                    Y_batch_params.append(np.concatenate([pose.ravel(), beta, trans]))
                    Y_batch_pc.append(pointcloud)

                    # Render the silhouette
                    silhouette = Mesh(pointcloud=pointcloud).render_silhouette(dim=self.img_dim, show=False)
                    X_batch.append(np.array(silhouette))

                # Preprocess the batches and yield them
                Y_batch = [np.array(Y_batch_params), np.array(Y_batch_pc)]
                X_batch = np.array(X_batch, dtype="float32")
                X_batch /= 255
                X_batch = X_batch.reshape((X_batch.shape[0], X_batch.shape[1], X_batch.shape[2], 1))

                self.debug_X = X_batch
                self.debug_Y = Y_batch

            else:
                X_batch = self.debug_X
                Y_batch = self.debug_Y

        else:
            # Split of random and real data
            num_artificial = int(np.round(self.frac_randomised * self.batch_size))
            num_real = int(self.batch_size - num_artificial)

            # Retrieve a random batch of parameters from the data directory
            if num_real > 0:
                data = np.array(os.listdir(self.data_dir))
                Y_batch_ids = data[np.random.randint(low=0, high=data.shape[0], size=num_real)]
            else:
                Y_batch_ids = []

            Y_batch_params = []
            Y_batch_pc = []
            X_batch = []
            for Y_id in Y_batch_ids:
                # Fetch the real data
                Y = np.load(os.path.join(self.data_dir, Y_id))

                # Add a small amount of noise to the data
                Y += np.random.uniform(low=-self.noise, high=self.noise, size=Y.shape)
                Y_batch_params.append(Y)

                # Now generate the silhouette from the SMPL meshes
                # Create the body mesh
                pose = Y[:72]
                beta = Y[72:82]
                trans = Y[82:]
                pointcloud = smpl.set_params(pose.reshape((24, 3)), beta, trans)

                # Render the silhouette
                silhouette = Mesh(pointcloud=pointcloud).render_silhouette(dim=self.img_dim, show=False)
                X_batch.append(np.array(silhouette))

            for i in range(num_artificial):
                # Generate artificial data
                pose = 0.65 * (np.random.rand(smpl.pose_shape[0], smpl.pose_shape[1]) - 0.5)
                beta = 0.2 * (np.random.rand(smpl.beta_shape[0]) - 0.5)
                trans = np.zeros(smpl.trans_shape[0])
                #trans = 0.1 * (np.random.rand(smpl.trans_shape[0]) - 0.5)

                # Create the body mesh
                pointcloud = smpl.set_params(beta=beta, pose=pose, trans=trans)
                Y_batch_params.append(np.concatenate([pose.ravel(), beta, trans]))
                Y_batch_pc.append(pointcloud)

                # Render the silhouette
                silhouette = Mesh(pointcloud=pointcloud).render_silhouette(dim=self.img_dim, show=False)
                X_batch.append(np.array(silhouette))

            # Preprocess the batches and yield them
            Y_batch = [np.array(Y_batch_params), np.array(Y_batch_pc)]
            X_batch = np.array(X_batch, dtype="float32")
            X_batch /= 255
            X_batch = X_batch.reshape((X_batch.shape[0], X_batch.shape[1], X_batch.shape[2], 1))

        #print("X_batch shape " + str(X_batch.shape))
        #print("Y_batch shape " + str(Y_batch.shape))
        #X_batch = list(X_batch)

        return X_batch, Y_batch
Example #8
0
def gather_input_data(data_samples,
                      smpl,
                      PARAMS_TO_OFFSET,
                      POSE_OFFSET,
                      ARCHITECTURE,
                      param_trainable,
                      num_test_samples=5,
                      MODE="RODRIGUES",
                      LOAD_DATA_DIR=None,
                      kin_tree=[],
                      dist="uniform"):
    # Prepare initial input data
    zero_params = np.zeros(shape=(85, ))
    zero_pc = smpl.set_params(beta=zero_params[72:82],
                              pose=zero_params[0:72].reshape((24, 3)),
                              trans=zero_params[82:85])

    X_indices = np.array([i for i in range(data_samples)])
    zero_params = np.array([zero_params for i in range(data_samples)],
                           dtype="float32")

    if LOAD_DATA_DIR is not None:
        # Load data from existing directory
        all_X_params, all_X_pcs = load_data(LOAD_DATA_DIR,
                                            num_samples=data_samples,
                                            load_silhouettes=False)
    else:
        # Generate the data
        if not all(value == 0.0 for value in POSE_OFFSET.values()):
            print("Offsetting parameters...")
            all_params = offset_params(zero_params,
                                       PARAMS_TO_OFFSET,
                                       POSE_OFFSET,
                                       dist=dist)
            if num_test_samples > 0:
                assert data_samples >= num_test_samples
                X_params = all_params[:num_test_samples]
            print("X_params shape: " + str(X_params.shape))
            print("Rendering parameters...")
            X_pcs = np.array([
                np.array(
                    smpl.set_params(beta=params[72:82],
                                    pose=params[0:72].reshape((24, 3)),
                                    trans=params[82:85]).copy())
                for params in X_params
            ])
            print("X_pcs shape: " + str(X_pcs.shape))

            all_X_pcs = np.zeros((data_samples, 6890, 3))
            all_X_params = np.zeros((data_samples, 85))
            if num_test_samples > 0:
                all_X_pcs[:num_test_samples] = X_pcs
                print("all_X_pcs shape: " + str(all_X_pcs.shape))

                all_X_params[:num_test_samples] = X_params
                print("all_X_params shape: " + str(all_X_params.shape))
        else:
            zero_params = np.zeros(shape=(85, ))
            zero_pc = smpl.set_params(beta=zero_params[72:82],
                                      pose=zero_params[0:72].reshape((24, 3)),
                                      trans=zero_params[82:85])
            #print("zero_pc: " + str(zero_pc))
            all_X_params = zero_params
            all_X_pcs = np.array([zero_pc for i in range(data_samples)],
                                 dtype="float32")

    if MODE == "EULER":
        # Convert from Rodrigues to Euler angles
        all_X_params = rodrigues_to_euler(all_X_params, smpl)

    all_X_silhs = []
    for pc in all_X_pcs:
        silh = Mesh(pointcloud=pc).render_silhouette(dim=[128, 128],
                                                     show=False)
        all_X_silhs.append(silh)

    X_data = [
        np.array(X_indices),
        np.array(all_X_params),
        np.array(all_X_pcs),
        np.array(all_X_silhs)
    ]
    Y_data = architecture_output_array(ARCHITECTURE, data_samples)

    if ARCHITECTURE == "NewDeepConv1DOptLearnerArchitecture":
        trainable_params_mask = [
            int(param_trainable[key])
            for key in sorted(param_trainable.keys(),
                              key=lambda x: int(x[6:8]))
        ]
        #print(trainable_params_mask)
        trainable_params_mask = np.tile(trainable_params_mask,
                                        (data_samples, 1))
        print("trainable_params_mask shape: " +
              str(trainable_params_mask.shape))
        X_data += [trainable_params_mask]

    if ARCHITECTURE == "PeriodicOptLearnerArchitecture":
        new_kin_tree = []
        for level in kin_tree:
            level_params = []
            for param in level:
                level_params.append(param.replace("param_", ""))
            new_kin_tree.append(level_params)

        params_to_train = [1 for _ in range(85)]
        params_to_train = np.tile(params_to_train, (data_samples, 1))
        print("params_to_train shape: " + str(params_to_train.shape))
        X_data += [params_to_train]

    return X_data, Y_data
    def on_epoch_end(self, epoch, logs=None):
        """ Store the model loss and accuracy at the end of every epoch, and store a model prediction on data """
        #print("Callback called at epoch " + str(epoch))
        epoch = int(epoch)
        if logs is not None:
            self.epoch_log.write(
                json.dumps({'epoch': epoch})[:-1] + ", " +
                json.dumps(logs)[1:] + '\n')

        if (epoch + 1) % self.period == 0 or epoch == 0 or epoch == -1:
            # Predict on all of the given input parameters
            for data_type, data in self.input_data.items():
                if data[0][0] is not None:
                    print("Saving to directory: \n{}\n".format(self.pred_path))
                    # Predict on these input parameters
                    #print("data value: " + str(data))
                    data_dict = {
                        "embedding_index": np.array(data[0]),
                        "gt_params": np.array(data[1]),
                        "gt_pc": np.array(data[2])
                    }

                    preds = self.model.predict(
                        data_dict)  #, batch_size=len(data[0]))

                    print(str(data_type))
                    print("------------------------------------")

                    metrics_names = self.model.metrics_names[:-2]
                    output_names = [
                        metric[:-5] for i, metric in enumerate(metrics_names)
                        if i > 0
                    ]
                    preds_dict = {
                        output_name: preds[i]
                        for i, output_name in enumerate(output_names)
                    }
                    #print(preds_dict)
                    #exit(1)

                    self.delta_d_log.write('epoch {:05d}\n'.format(epoch + 1))
                    param_diff_sines = np.abs(
                        np.sin(0.5 * (data[1] - preds_dict["learned_params"])))
                    delta_d_diff_sines = np.abs(
                        np.sin(0.5 * (preds_dict["delta_d"] -
                                      preds_dict["delta_d_hat"])))
                    trainable_diff_sines = []
                    for parameter in self.trainable_params:
                        param_int = int(parameter[6:])
                        trainable_diff_sines.append(
                            param_diff_sines[:, param_int])
                        print("Parameter: " + str(parameter))
                        print("GT SMPL: " + str(data[1][:, param_int]))
                        print("Parameters: " +
                              str(preds_dict["learned_params"][:, param_int]))
                        print("Parameter ang. MAE: " +
                              str(param_diff_sines[:, param_int]))
                        print("Delta_d: " +
                              str(preds_dict["delta_d"][:, param_int]))
                        print("Delta_d_hat: " +
                              str(preds_dict["delta_d_hat"][:, param_int]))
                        print("Difference sine: " +
                              str(delta_d_diff_sines[:, param_int]))
                        #print("Delta_d_hat loss: " + str(preds_dict["delta_d_hat_mse"]))
                        #print("Difference sine (direct): " + str(np.sin(preds_dict["delta_d"] - preds_dict["delta_d_hat"])[:, param_int]))
                        #print("Difference sine (from normals): " + str(preds_dict["diff_angles"]))
                        print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")

                        #self.delta_d_log.write('parameter: ' + str(parameter) + '\n' + 'Delta_d: ' + str(preds[6][:, param_int]) + '\n')
                        self.delta_d_log.write('parameter: ' + str(parameter) +
                                               '\n' + 'Delta_d: ' +
                                               str(preds[7][:, param_int]) +
                                               '\n')

                    avg_diff_sines = np.mean(trainable_diff_sines, axis=0)

                    # Track resets
                    BLOCK_SIZE = self.data_samples / self.RESET_PERIOD
                    #print("BLOCK_SIZE " + str(BLOCK_SIZE))
                    BLOCKS = self.examples // BLOCK_SIZE
                    #print("BLOCKS " + str(BLOCKS))
                    if (epoch - 1) < 0 or self.testing:
                        was_reset = [False, False, False, False, False]
                    else:
                        INDEX = (epoch - 1) % self.RESET_PERIOD
                        #print("INDEX " + str(INDEX))
                        was_reset = [entry == INDEX for entry in BLOCKS]
                    #print("was_reset " + str(was_reset))
                    #exit(1)

                    silh_comp_list = []
                    for i, learned_pc in enumerate(preds[2], 1):
                        # Store the learned mesh
                        print_mesh(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.pred_pc_{:03d}.obj".format(
                                    data_type, epoch + 1, i)), learned_pc,
                            self.smpl.faces)

                        pred_silhouette = Mesh(
                            pointcloud=learned_pc).render_silhouette(
                                show=False)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.pred_silh_{:03d}.png".format(data_type, epoch + 1, i)), pred_silhouette)

                        if self.gt_silhouettes[data_type] is not None:
                            # Store predicted silhouette and the difference between it and the GT silhouette
                            #gt_silhouette = (self.gt_silhouettes[data_type][i-1] * 255).astype("uint8")
                            gt_silhouette = self.gt_silhouettes[data_type][
                                i - 1].astype("uint8")
                            #print("gt_silhouette shape: " + str(gt_silhouette.shape))
                            gt_silhouette = gt_silhouette.reshape(
                                (gt_silhouette.shape[0],
                                 gt_silhouette.shape[1]))
                            #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.gt_silh_{:03d}.png".format(data_type, epoch + 1, i)), gt_silhouette)

                            diff_silh = (gt_silhouette !=
                                         pred_silhouette) * 255
                            #diff_silh = abs(gt_silhouette - pred_silhouette)
                            #print(diff_silh.shape)
                            #cv2.imshow("Diff silh", diff_silh)
                            #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(data_type, epoch + 1, i)), diff_silh.astype("uint8"))
                            silh_comp = np.concatenate(
                                [gt_silhouette, pred_silhouette, diff_silh],
                                axis=1)
                            #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:05d}.silh_comp_{:03d}.png".format(data_type, epoch + 1, i)), silh_comp.astype("uint8"))

                            if was_reset[i - 1]:
                                # Grey the image
                                silh_comp /= 2

                            # Convert to rgb and write the difference sine to the image
                            silh_comp_rgb = np.zeros(
                                (silh_comp.shape[0], silh_comp.shape[1], 3))
                            for c in range(3):
                                silh_comp_rgb[:, :, c] = silh_comp

                            # Write to the image
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            bottomLeftCorner = (550, 30)
                            fontScale = 0.6
                            fontColor = (0, 0, 255)
                            lineType = 2
                            cv2.putText(
                                silh_comp_rgb, "Ang. MAE: {0:.3f}".format(
                                    avg_diff_sines[i - 1]), bottomLeftCorner,
                                font, fontScale, fontColor, lineType)

                            # Add image to list
                            silh_comp_list.append(silh_comp_rgb)

                        # Save the predicted point cloud relative to the GT point cloud
                        print_mesh(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.gt_pc_{:03d}.obj".format(
                                    data_type, epoch + 1, i)), data[2][i - 1],
                            self.smpl.faces)
                        print_point_clouds(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.comparison_{:03d}.obj".format(
                                    data_type, epoch + 1, i)),
                            [learned_pc, data[2][i - 1]], [(255, 0, 0),
                                                           (0, 255, 0)])

                    if len(silh_comp_list) > 0:
                        silh_comps_rgb = np.concatenate(silh_comp_list, axis=0)

                        font = cv2.FONT_HERSHEY_SIMPLEX
                        topLeftCorner = (30, 30)
                        fontScale = 1
                        fontColor = (0, 0, 255)
                        lineType = 2

                        if self.testing:
                            text = "Iteration "
                        else:
                            text = "Epoch "

                        cv2.putText(silh_comps_rgb, text + str(epoch + 1),
                                    topLeftCorner, font, fontScale, fontColor,
                                    lineType)
                        cv2.imwrite(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.silh_comps.png".format(
                                    data_type, epoch + 1)),
                            silh_comps_rgb.astype("uint8"))
Example #10
0
    def on_epoch_end(self, epoch, logs=None):
        """ Store the model loss and accuracy at the end of every epoch, and store a model prediction on data """
        self.epoch_log.write(
            json.dumps({
                'epoch': epoch,
                'loss': logs['loss']
            }) + '\n')

        if (epoch + 1) % self.period == 0 or epoch == 0:
            # Predict on all of the given silhouettes
            for data_type, data in self.pred_data.items():
                if data is not None:
                    if not isinstance(data, list) or type(data) == np.array:
                        data = np.array(data)
                        data = data.reshape(
                            (1, data.shape[0], data.shape[1], data.shape[2]))

                    #for i, silhouette in enumerate(data):
                    #    # Save silhouettes
                    #    silhouette *= 255
                    #    cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.gt_silh_{:03d}.png".format(data_type, epoch + 1, i)), silhouette.astype("uint8"))

                    preds = self.model.predict(data)
                    #print("Predictions: " + str(preds))

                    for i, pred in enumerate(preds[1], 1):
                        #self.smpl.set_params(pred[:72].reshape((24, 3)), pred[72:82], pred[82:])
                        #self.smpl.save_to_obj(os.path.join(self.pred_path, "{}_pred_{:03d}.obj".format(data_type, i)))
                        #print_mesh(os.path.join(self.pred_path, "epoch.{:03d}.{}_gt_{:03d}.obj".format(epoch, data_type, i)), gt[i-1], smpl.faces)
                        print_mesh(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:03d}.pred_{:03d}.obj".format(
                                    data_type, epoch + 1, i)), pred,
                            self.smpl.faces)

                        # Store predicted silhouette and the difference between it and the GT silhouette
                        gt_silhouette = (data[i - 1] *
                                         255).astype("uint8").reshape(
                                             data.shape[1], data.shape[2])
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.gt_silh_{:03d}.png".format(data_type, epoch + 1, i)), gt_silhouette)

                        pred_silhouette = Mesh(
                            pointcloud=pred).render_silhouette(show=False)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.pred_silh_{:03d}.png".format(data_type, epoch + 1, i)), pred_silhouette)

                        diff_silh = abs(gt_silhouette - pred_silhouette)
                        #print(diff_silh.shape)
                        #cv2.imshow("Diff silh", diff_silh)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(data_type, epoch + 1, i)), diff_silh.astype("uint8"))
                        silh_comp = np.concatenate(
                            [gt_silhouette, pred_silhouette, diff_silh])
                        cv2.imwrite(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:03d}.silh_comp_{:03d}.png".format(
                                    data_type, epoch + 1, i)),
                            silh_comp.astype("uint8"))

                        if self.gt_pc[data_type] is not None:
                            print_mesh(
                                os.path.join(
                                    self.pred_path,
                                    "{}_epoch.{:03d}.gt_pc_{:03d}.obj".format(
                                        data_type, epoch + 1, i)),
                                self.gt_pc[data_type], self.smpl.faces)
                            print_point_clouds(
                                os.path.join(
                                    self.pred_path,
                                    "{}_epoch.{:03d}.comparison_{:03d}.obj".
                                    format(data_type, epoch + 1, i)),
                                [pred, self.gt_pc[data_type]], [(255, 0, 0),
                                                                (0, 255, 0)])

                    if self.visualise:
                        # Show a random sample
                        rand_index = np.random.randint(low=0,
                                                       high=len(data)) + 1
                        mesh = Mesh(filepath=os.path.join(
                            self.pred_path, "{}_epoch.{:03d}.pred_{:03d}.obj".
                            format(data_type, epoch + 1, rand_index)))

                        # Show the true silhouette
                        true_silh = data[rand_index - 1]
                        true_silh = true_silh.reshape(true_silh.shape[:-1])
                        plt.imshow(true_silh, cmap='gray')
                        plt.title("True {} silhouette {:03d}".format(
                            data_type, rand_index))
                        plt.show()

                        # Show the predicted silhouette and mesh
                        mesh.render_silhouette(
                            title="Predicted {} silhouette {:03d}".format(
                                data_type, rand_index))
                        diff_silh = cv2.imread(
                            "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(
                                data_type, epoch + 1, rand_index))
                        cv2.imshow(
                            "Predicted {} silhouette {:03d}".format(
                                data_type, rand_index), diff_silh)

                        try:
                            mesh.render3D()
                        except Exception:
                            pass
Example #11
0
    def on_epoch_end(self, epoch, logs=None):
        """ Store the model loss and accuracy at the end of every epoch, and store a model prediction on data """
        #print("Callback called at epoch " + str(epoch))
        epoch = int(epoch)
        if logs is not None:
            self.epoch_log.write(json.dumps({'epoch': epoch})[:-1] + ", " + json.dumps(logs)[1:] + '\n')

        #if (epoch + 1) % self.period == 0 or epoch == 0 or epoch == -1:
        if epoch % self.period == 0 or epoch == -1:
            # Predict on all of the given input parameters
            for data_type, data in self.input_data.items():
                if data[0][0] is not None or self.generator_paths[data_type] is not None:
                    print("Saving to directory: \n{}\n".format(self.pred_path))
                    # Predict on these input parameters
                    #print("data value: " + str(data))
                    gen_path = self.generator_paths[data_type]
                    additional_input = None
                    if gen_path is not None:
                        gen_path = gen_path + "cb_samples_E{}.npz".format(epoch)
                        try:
                            with np.load(gen_path, allow_pickle=True) as temp_data:
                                print(temp_data.keys())
                                if "trainable_params" in temp_data.keys():
                                    data = [temp_data["indices"], temp_data["params"], temp_data["pcs"], temp_data["trainable_params"]]
                                    additional_input = "trainable_params"
                                elif "params_to_train" in temp_data.keys():
                                    data = [temp_data["indices"], temp_data["params"], temp_data["pcs"], temp_data["params_to_train"]]
                                    additional_input = "params_to_train"
                                else:
                                    data = [temp_data["indices"], temp_data["params"], temp_data["pcs"]]
                        except Exception as e:
                            print("Skipping - load failed with exception '{}'".format(e))
                            return None

                    data_dict = {"embedding_index": np.array(data[0]), "gt_params": np.array(data[1]), "gt_pc": np.array(data[2])}
                    if self.ARCHITECTURE == "PeriodicOptLearnerArchitecture":
                        additional_input = "params_to_train"
                    if self.ARCHITECTURE == "NewDeepConv1DOptLearnerArchitecture":
                        additional_input = "trainable_params"
                    if additional_input is not None:
                        data_dict[additional_input] = np.array(data[3])
                    preds = self.model.predict(data_dict) #, batch_size=len(data[0]))

                    print(str(data_type))
                    print("------------------------------------")

                    #metrics_names = self.model.metrics_names[:-2]
                    metrics_names = self.model.metrics_names[:-1]
                    #print(metrics_names)
                    output_names = [metric[:-5] for i, metric in enumerate(metrics_names) if i > 0]
                    preds_dict = {output_name: preds[i] for i, output_name in enumerate(output_names)}
                    #print(preds_dict)
                    #exit(1)

                    #print("GT SMPL for first example: " + str(data[1][0]))
                    #print("Diff for first example: " + str(data[1][0] - preds_dict["learned_params"][0]))

                    param_diff_sines = np.abs(np.sin(0.5*(data[1] - preds_dict["learned_params"])))
                    delta_d_diff_sines = np.abs(np.sin(0.5*(preds_dict["delta_d"] - preds_dict["delta_d_hat"])))
                    trainable_diff_sines = []
                    for i, parameter in enumerate(self.trainable_params):
                        param_int = int(parameter[6:8])
                        trainable_diff_sines.append(param_diff_sines[:, param_int])
                        print("Parameter: " + str(parameter))
                        print("GT SMPL: " + str(data[1][:, param_int]))
                        print("Parameters: " + str(preds_dict["learned_params"][:, param_int]))
                        print("Parameter ang. MAE: " + str(param_diff_sines[:, param_int]))
                        if "delta_d_hat_mu" in preds_dict.keys():
                            print("Delta_d_hat_mu: " + str(preds_dict["delta_d_hat_mu"][:, param_int]))   # ProbCNN architecture only
                            print("Delta_d_hat_sigma: " + str(preds_dict["delta_d_hat_sigma"][:, param_int]))   # ProbCNN architecture only
                        print("Delta_d: " + str(preds_dict["delta_d"][:, param_int]))
                        print("Delta_d_hat: " + str(preds_dict["delta_d_hat"][:, param_int]))
                        #print("Delta_d_hat: " + str(preds_dict["delta_d_hat"][:, i+1]))
                        print("Difference sine: " + str(delta_d_diff_sines[:, param_int]))
                        #print("Difference sine: " + str(delta_d_diff_sines[:, i]))
                        #print("Delta_d_hat loss: " + str(preds_dict["delta_d_hat_mse"]))
                        #print("Difference sine (direct): " + str(np.sin(preds_dict["delta_d"] - preds_dict["delta_d_hat"])[:, param_int]))
                        #print("Difference sine (from normals): " + str(preds_dict["diff_angles"]))
                        print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")

                    #print("Predictions for first example: " + str(preds_dict["delta_d_hat"][0]))
                    if "rot3d_pose" in preds_dict.keys():
                        #print("rot3d_pose trace: " + str(np.trace(preds_dict["rot3d_pose"][0], axis1=1, axis2=2)))
                        #print("rot3d_delta_d_pose trace: " + str(np.trace(preds_dict["rot3d_delta_d_pose"][0], axis1=1, axis2=2)))
                        #print("rot3d_pose: " + str(preds_dict["rot3d_pose"][0]))   # RotConv1d architecture only
                        #print("rot3d_delta_d_pose: " + str(preds_dict["rot3d_delta_d_pose"][0]))   # RotConv1d architecture only
                        print("rot3d_pose error: " + str(preds_dict["rot3d_pose"][0] - preds_dict["rot3d_delta_d_pose"][0]))   # RotConv1d architecture only
                        #pass
                    if "mapped_pose" in preds_dict.keys():
                        #print("mapped_pose: " + str(preds_dict["mapped_pose"][0]))   # RotConv1d architecture only
                        #print("mapped_delta_d_pose: " + str(preds_dict["mapped_delta_d_pose"][0]))   # RotConv1d architecture only
                        print("mapped_pose error: " + str(preds_dict["mapped_pose"][0] - preds_dict["mapped_delta_d_pose"][0]))   # RotConv1d architecture only
                        pass
                    if "rodrigues_delta_d_pose" in preds_dict.keys():
                        #print("rodrigues pose error: " + str(preds_dict["rodrigues_delta_d_pose"][0] - preds_dict["delta_d_pose_vec"][0]))
                        pass

                    avg_diff_sines = np.mean(trainable_diff_sines, axis=0)

                    if epoch == -1:
                        # print parameters to file
                        gt_example_parameters = data[1]
                        pred_example_parameters = preds_dict["learned_params"]
                        diff_example_parameters = gt_example_parameters - pred_example_parameters
                        param_save_dir = self.pred_path + "/example_parameters/"
                        os.system('mkdir ' + str(param_save_dir))

                        np.savetxt(param_save_dir + "gt_params.txt", gt_example_parameters)
                        np.savetxt(param_save_dir + "pred_params.txt", pred_example_parameters)
                        np.savetxt(param_save_dir + "diff.txt", diff_example_parameters)

                    # Track resets
                    BLOCK_SIZE = self.data_samples / self.RESET_PERIOD
                    #print("BLOCK_SIZE " + str(BLOCK_SIZE))
                    BLOCKS = self.examples // BLOCK_SIZE
                    #print("BLOCKS " + str(BLOCKS))
                    #if (epoch - 1) < 0 or self.testing:
                    if epoch < 0 or self.testing:
                        was_reset = [False for _ in BLOCKS]
                    else:
                        #INDEX = (epoch - 1) % self.RESET_PERIOD
                        INDEX = epoch % self.RESET_PERIOD
                        #print("INDEX " + str(INDEX))
                        was_reset = [entry == INDEX for entry in BLOCKS]
                    #print("was_reset " + str(was_reset))
                    #exit(1)

                    silh_comp_list = []
                    for i, learned_pc in enumerate(preds[2], 1):
                        # Store the learned mesh
                        print_mesh(os.path.join(self.pred_path, "{}_epoch.{:05d}.pred_pc_{:03d}.obj".format(data_type, epoch + 1, i)), learned_pc, self.smpl.faces)

                        pred_silhouette = Mesh(pointcloud=learned_pc).render_silhouette(show=False)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.pred_silh_{:03d}.png".format(data_type, epoch + 1, i)), pred_silhouette)
                        gt_silhouette = Mesh(pointcloud=data_dict["gt_pc"][i-1]).render_silhouette(show=False)

                        # Store predicted silhouette and the difference between it and the GT silhouette
                        #gt_silhouette = (self.gt_silhouettes[data_type][i-1] * 255).astype("uint8")
                        #gt_silhouette = self.gt_silhouettes[data_type][i-1].astype("uint8")
                        #print("gt_silhouette shape: " + str(gt_silhouette.shape))
                        #gt_silhouette = gt_silhouette.reshape((gt_silhouette.shape[0], gt_silhouette.shape[1]))
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.gt_silh_{:03d}.png".format(data_type, epoch + 1, i)), gt_silhouette)

                        diff_silh = (gt_silhouette != pred_silhouette)*255
                        #diff_silh = abs(gt_silhouette - pred_silhouette)
                        #print(diff_silh.shape)
                        #cv2.imshow("Diff silh", diff_silh)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(data_type, epoch + 1, i)), diff_silh.astype("uint8"))
                        silh_comp = np.concatenate([gt_silhouette, pred_silhouette, diff_silh], axis=1)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:05d}.silh_comp_{:03d}.png".format(data_type, epoch + 1, i)), silh_comp.astype("uint8"))

                        if was_reset[i-1]:
                            # Grey the image
                            silh_comp /= 2

                        # Convert to rgb and write the difference sine to the image
                        silh_comp_rgb = np.zeros((silh_comp.shape[0], silh_comp.shape[1], 3))
                        for c in range(3):
                            silh_comp_rgb[:, :, c] = silh_comp

                        # Write to the image
                        font                   = cv2.FONT_HERSHEY_SIMPLEX
                        ang_mae                = (550,30)
                        normals_loss           = (550,240)
                        gt_main_rot            = (0, 70)
                        pred_main_rot          = (0, 50)
                        delta_d_hat_pos        = (0, 90)
                        fontScale              = 0.6
                        fontColor              = (0,0,255)
                        lineType               = 2
                        cv2.putText(silh_comp_rgb, "Ang. MAE: {0:.3f}".format(avg_diff_sines[i-1]),
                                ang_mae,
                                font,
                                fontScale,
                                fontColor,
                                lineType)

                        cv2.putText(silh_comp_rgb, "Norm. Loss: {0:.3f}".format(np.mean(preds_dict["diff_angle_mse"][i-1])),
                                normals_loss,
                                font,
                                fontScale,
                                fontColor,
                                lineType)

                        cv2.putText(silh_comp_rgb, "Main rot.: " +str(preds_dict["learned_params"][i-1, 0:3]),
                                pred_main_rot,
                                font,
                                fontScale,
                                fontColor,
                                lineType)

                        cv2.putText(silh_comp_rgb, "GT Main rot.: " +str(data[1][i-1, 0:3]),
                                gt_main_rot,
                                font,
                                fontScale,
                                fontColor,
                                lineType)

                        cv2.putText(silh_comp_rgb, "delta_d_hat: " +str(preds_dict["delta_d_hat"][i-1, 0:3]),
                                delta_d_hat_pos,
                                font,
                                fontScale,
                                fontColor,
                                lineType)
                        # Add image to list
                        silh_comp_list.append(silh_comp_rgb)

                        # Save the predicted point cloud relative to the GT point cloud
                        print_mesh(os.path.join(self.pred_path, "{}_epoch.{:05d}.gt_pc_{:03d}.obj".format(data_type, epoch + 1, i)), data[2][i-1], self.smpl.faces)
                        print_point_clouds(os.path.join(self.pred_path, "{}_epoch.{:05d}.comparison_{:03d}.obj".format(data_type, epoch + 1, i)), [learned_pc, data[2][i-1]], [(255,0,0),(0,255,0)])

                    if len(silh_comp_list) > 0:
                        silh_comps_rgb = np.concatenate(silh_comp_list, axis=0)

                        font                   = cv2.FONT_HERSHEY_SIMPLEX
                        topLeftCorner          = (30,30)
                        fontScale              = 1
                        fontColor              = (0,0,255)
                        lineType               = 2

                        if self.testing:
                            text = "Iteration "
                        else:
                            text = "Epoch "

                        cv2.putText(silh_comps_rgb, text + str(epoch + 1),
                                    topLeftCorner,
                                    font,
                                    fontScale,
                                    fontColor,
                                    lineType)
                        cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:05d}.silh_comps.png".format(data_type, epoch + 1)), silh_comps_rgb.astype("uint8"))
Example #12
0
    def on_epoch_end(self, epoch, logs=None):
        """ Store the model loss and accuracy at the end of every epoch, and store a model prediction on data """
        if logs is not None:
            self.epoch_log.write(
                json.dumps({
                    'epoch': epoch,
                    'loss': logs['loss']
                }) + '\n')
            self.param_log.write(
                json.dumps({
                    'epoch': epoch,
                    'delta_d_mse_loss': logs['delta_d_mse_loss']
                }) + '\n')
            self.mesh_log.write(
                json.dumps(
                    {
                        'epoch': epoch,
                        'pc_mean_euc_dist_loss': logs['pc_mean_euc_dist_loss']
                    }) + '\n')

        epoch = int(epoch)
        if (epoch + 1) % self.period == 0 or epoch == 0:
            # Predict on all of the given input parameters
            for data_type, data in self.input_data.items():
                if data[0][0] is not None:
                    print("Saving to directory '{}'".format(self.pred_path))
                    # Predict on these input parameters
                    #print("data value: " + str(data))
                    data_dict = {
                        "embedding_index": np.array(data[0]),
                        "gt_params": np.array(data[1]),
                        "gt_pc": np.array(data[2])
                    }

                    #print("embedding index size: " + str(data_dict["embedding_index"].shape))
                    #print("gt_params size: " + str(data_dict["gt_params"].shape))
                    #print("gt_pc size: " + str(data_dict["gt_pc"].shape))
                    #exit(1)
                    preds = self.model.predict(
                        data_dict)  #, batch_size=len(data[0]))

                    print(str(data_type))
                    print("------------------------------------")
                    #print("GT SMPL: " + str(data[1][:,0:3]))
                    #print("Parameters: " + str(preds[0][:,0:3]))
                    #print("Delta_d: " + str(preds[6][:,0:3]))
                    #print("Delta_d_hat: " + str(preds[7][:,0:3]))

                    #                    print("epoch: " + str(epoch + 1))
                    #                    exit(1)
                    self.delta_d_log.write('epoch {:05d}\n'.format(epoch + 1))
                    for parameter in self.trainable_params:
                        param_int = int(parameter[6:])
                        print("Parameter: " + str(parameter))
                        print("GT SMPL: " + str(data[1][:, param_int]))
                        print("Parameters: " + str(preds[0][:, param_int]))
                        print("Delta_d: " + str(preds[6][:, param_int]))
                        print("Delta_d_hat: " + str(preds[7][:, param_int]))
                        print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")

                        self.delta_d_log.write('parameter: ' + str(parameter) +
                                               '\n' + 'Delta_d: ' +
                                               str(preds[6][:, param_int]) +
                                               '\n')

                    for i, learned_pc in enumerate(preds[2], 1):
                        # Store the learned mesh
                        print_mesh(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.pred_pc_{:03d}.obj".format(
                                    data_type, epoch + 1, i)), learned_pc,
                            self.smpl.faces)

                        pred_silhouette = Mesh(
                            pointcloud=learned_pc).render_silhouette(
                                show=False)
                        #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.pred_silh_{:03d}.png".format(data_type, epoch + 1, i)), pred_silhouette)

                        if self.gt_silhouettes[data_type] is not None:
                            # Store predicted silhouette and the difference between it and the GT silhouette
                            #gt_silhouette = (self.gt_silhouettes[data_type][i-1] * 255).astype("uint8")
                            gt_silhouette = self.gt_silhouettes[data_type][
                                i - 1].astype("uint8")
                            #print("gt_silhouette shape: " + str(gt_silhouette.shape))
                            gt_silhouette = gt_silhouette.reshape(
                                (gt_silhouette.shape[0],
                                 gt_silhouette.shape[1]))
                            #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.gt_silh_{:03d}.png".format(data_type, epoch + 1, i)), gt_silhouette)

                            diff_silh = abs(gt_silhouette - pred_silhouette)
                            #print(diff_silh.shape)
                            #cv2.imshow("Diff silh", diff_silh)
                            #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(data_type, epoch + 1, i)), diff_silh.astype("uint8"))
                            silh_comp = np.concatenate(
                                [gt_silhouette, pred_silhouette, diff_silh],
                                axis=1)
                            cv2.imwrite(
                                os.path.join(
                                    self.pred_path,
                                    "{}_epoch.{:05d}.silh_comp_{:03d}.png".
                                    format(data_type, epoch + 1, i)),
                                silh_comp.astype("uint8"))

                        # Save the predicted point cloud relative to the GT point cloud
                        print_mesh(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.gt_pc_{:03d}.obj".format(
                                    data_type, epoch + 1, i)), data[2][i - 1],
                            self.smpl.faces)
                        print_point_clouds(
                            os.path.join(
                                self.pred_path,
                                "{}_epoch.{:05d}.comparison_{:03d}.obj".format(
                                    data_type, epoch + 1, i)),
                            [learned_pc, data[2][i - 1]], [(255, 0, 0),
                                                           (0, 255, 0)])
    dilate = 1
    if dilate == 1:
        morph_mask = np.array([[0.34, 0.34, 0.34], [0.34, 1.00, 0.34],
                               [0.34, 0.34, 0.34]])
        new_img = binary_closing(shifted_img != 0,
                                 structure=morph_mask,
                                 iterations=1).astype(np.uint8)
        new_img *= 255
    else:
        new_img = shifted_img

    return new_img


if __name__ == "__main__":
    mesh_dir = "/data/cvfs/hjhb2/projects/deep_optimiser/example_meshes/"
    obj_paths = os.listdir(mesh_dir)
    for obj_path in obj_paths:
        mesh = Mesh(os.path.join(mesh_dir, obj_path))
        silh = mesh.render_silhouette(dim=[256, 256], show=True)
        normalised_silh = normalise_img(silh, dim=(128, 128))

        #plt.imshow(silh_cropped, cmap="gray")
        plt.imshow(normalised_silh, cmap="gray")
        plt.show()

        augmented_silh = augment_image(normalised_silh)

        plt.imshow(augmented_silh, cmap="gray")
        plt.show()
Example #14
0
def get_pc_and_silh(path):
    mesh = Mesh(filepath=path)
    pc = mesh.render_silhouette_with_closing(show=False, closing=False)
    silh = mesh.render_silhouette_with_closing(show=False, closing=True)

    return pc, silh
#eval_string = ""
#for i in range(len(eval_log)):
#    eval_string += str(optlearner_model.metrics_names[i]) + ": " + str(eval_log[i]) + "  "
#print(eval_string)

# Predict the parameters from the silhouette and generate a mesh
for
    prediction = optlearner_model.predict(test_sample_input)
for i, pred in enumerate(prediction, 1):


    learned_pc = pred[2]

    gt_silhoutte = test_sample_silh[i-1]
    pred_silhouette = Mesh(pointcloud=learned_pc).render_silhouette(show=False)

    diff_silh = abs(gt_silhouette - pred_silhouette)
    #print(diff_silh.shape)
    #cv2.imshow("Diff silh", diff_silh)
    #cv2.imwrite(os.path.join(self.pred_path, "{}_epoch.{:03d}.diff_silh_{:03d}.png".format(data_type, epoch + 1, i)), diff_silh.astype("uint8"))
    silh_comp = np.concatenate([gt_silhouette, pred_silhouette, diff_silh])
    cv2.imwrite(os.path.join(test_vis_dir, "{}_epoch.{:05d}.silh_comp_{:03d}.png".format(data_type, epoch + 1, i)), silh_comp.astype("uint8"))


#pred_params = prediction[0]
#real_params = Y_test[0]
#
#for pred in pred_params:
#    pointcloud = smpl.set_params(pred[10:82], pred[0:10], pred[82:85])
#    pred_silhouette = Mesh(pointcloud=pointcloud).render_silhouette()