def OptLearnerDistArchitecture(parameter_initializer=RandomUniform(minval=-0.2, maxval=0.2, seed=10)):
    """ Optimised learner network architecture """
    # An embedding layer is required to optimise the parameters
    optlearner_input = Input(shape=(1,), name="embedding_index")
    optlearner_params = Embedding(1000, 85, embeddings_initializer=parameter_initializer, name="parameter_embedding")(optlearner_input)
    optlearner_params = Reshape(target_shape=(85,), name="learned_params")(optlearner_params)
    print("optlearner parameters shape: " +str(optlearner_params.shape))

    # Ground truth parameters and point cloud are inputs to the model as well
    gt_params = Input(shape=(85,), name="gt_params")
    gt_pc = Input(shape=(6890, 3), name="gt_pc")
    print("gt parameters shape: " + str(gt_params.shape))
    print("gt point cloud shape: " + str(gt_pc.shape))

    # Compute the true offset (i.e. difference) between the ground truth and learned parameters
    delta_d = Lambda(lambda x: tf.subtract(x[0], x[1]), name="delta_d")([gt_params, optlearner_params])
    print("delta_d shape: " + str(delta_d.shape))

    # Calculate the (batched) MSE between the learned parameters and the ground truth parameters
    false_loss_delta_d = Lambda(lambda x: tf.reduce_mean(tf.square(x), axis=1))(delta_d)
    false_loss_delta_d = Reshape(target_shape=(1,), name="delta_d_mse")(false_loss_delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))

    # Load SMPL model and get necessary parameters
    smpl_params = load_params('./keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(optlearner_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(optlearner_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(optlearner_params)

    # Get the point cloud corresponding to these parameters
    optlearner_pc = Points3DFromSMPLParams(input_betas, input_pose_rodrigues, input_trans, smpl_params, input_info)
    print("optlearner point cloud shape: " + str(optlearner_pc.shape))

    # Get the (batched) Euclidean loss between the learned and ground truth point clouds
    pc_euclidean_dist = Lambda(lambda x: tf.sqrt(tf.reduce_sum(tf.squared_difference(x[0], x[1]), axis=2)))([gt_pc, optlearner_pc])
    false_loss_pc = Lambda(lambda x: tf.reduce_mean(x, axis=1))(pc_euclidean_dist)
    false_loss_pc = Reshape(target_shape=(1,), name="pc_mean_euc_dist")(false_loss_pc)
    print("point cloud loss shape: " + str(false_loss_pc.shape))

    # Learn the offset in parameters from the difference between the ground truth and learned point clouds
    optlearner_architecture = Dense(512, activation="relu")(pc_euclidean_dist)
    optlearner_architecture = Dropout(0.5)(optlearner_architecture)
    optlearner_architecture = Dense(1024, activation="relu")(optlearner_architecture)
    optlearner_architecture = Dropout(0.5)(optlearner_architecture)
    delta_d_hat = Dense(85, activation="tanh")(optlearner_architecture)

    # Calculate the (batched) MSE between the learned and ground truth offset in the parameters
    false_loss_delta_d_hat = Lambda(lambda x: tf.reduce_mean(tf.square(tf.subtract(x[0], x[1])), axis=1))([delta_d, delta_d_hat])
    false_loss_delta_d_hat = Reshape(target_shape=(1,), name="delta_d_hat_mse")(false_loss_delta_d_hat)
    print("delta_d_hat loss shape: " + str(false_loss_delta_d_hat.shape))

    # Prevent model from using the delta_d_hat gradient in final loss
    delta_d_hat_NOGRAD = Lambda(lambda x: K.stop_gradient(x), name='optlearner_output_NOGRAD')(delta_d_hat)

    # False loss designed to pass the learned offset as a gradient to the embedding layer
    false_loss_smpl = Lambda(lambda x: tf.multiply(x[0], x[1]), name="smpl_diff")([optlearner_params, delta_d_hat_NOGRAD])
    print("smpl loss shape: " + str(false_loss_smpl.shape))

    return [optlearner_input, gt_params, gt_pc], [optlearner_params, false_loss_delta_d, optlearner_pc, false_loss_pc, false_loss_delta_d_hat, false_loss_smpl]
Exemple #2
0
def SimpleArchitecture(input_shape):
    """ Basic model for predicting 3D human pose and shape from an input silh """
    # The segmented silhouette is the model input
    input_silh = Input(shape=input_shape, name="input_silh")

    resnet_model = ResNet50(include_top=False,
                            input_tensor=input_silh,
                            input_shape=input_shape,
                            weights=None)
    encoder_architecture = Flatten()(resnet_model.outputs[0])
    encoder_params = Dense(85, activation="tanh")(encoder_architecture)

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde//basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(encoder_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(encoder_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(encoder_params)

    encoder_mesh = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                          input_trans, smpl_params, input_info)

    # Render the silhouette orthographically
    decoder_silh = render_orth(encoder_mesh)

    return [input_silh], [encoder_params, encoder_mesh, decoder_silh]
Exemple #3
0
def load_smpl_params():
    # Load SMPL model and get necessary parameters
    smpl_params = load_params('./keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    _, _, input_info = get_parameters()
    faces = smpl_params['f']    # canonical mesh faces
    print("faces shape: " + str(faces.shape))
    #exit(1)
    return smpl_params, input_info, faces
def NormLearnerArchitecture(parameter_initializer=RandomUniform(minval=-0.2, maxval=0.2, seed=10)):
    """ Normal learner network architecture """
    # An embedding layer is required to optimise the parameters
    normlearner_input = Input(shape=(1,), name="embedding_index")
    normlearner_params = Embedding(1000, 85, embeddings_initializer=parameter_initializer)(normlearner_input)
    normlearner_params = Reshape(target_shape=(85,), name="learned_params")(normlearner_params)
    print("normlearner parameters shape: " +str(normlearner_params.shape))

    # Ground truth parameters and point cloud are inputs to the model as well
    gt_params = Input(shape=(85,), name="gt_params")
    gt_pc = Input(shape=(6890, 3), name="gt_pc")
    print("gt parameters shape: " + str(gt_params.shape))
    print("gt point cloud shape: " + str(gt_pc.shape))

    # Compute the true offset (i.e. difference) between the ground truth and learned parameters
    delta_d = Lambda(lambda x: tf.subtract(x[0], x[1]), name="delta_d")([gt_params, normlearner_params])
    print("delta_d shape: " + str(delta_d.shape))

    # Calculate the (batched) MSE between the learned parameters and the ground truth parameters
    false_loss_delta_d = Lambda(lambda x: tf.reduce_mean(tf.square(x), axis=1))(delta_d)
    false_loss_delta_d = Reshape(target_shape=(1,), name="delta_d_mse")(false_loss_delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))

    # Load SMPL model and get necessary parameters
    smpl_params = load_params('./keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(normlearner_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(normlearner_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(normlearner_params)

    # Get the point cloud corresponding to these parameters
    normlearner_pc = Points3DFromSMPLParams(input_betas, input_pose_rodrigues, input_trans, smpl_params, input_info)
    print("normlearner point cloud shape: " + str(normlearner_pc.shape))

    # Get the (batched) MSE between the learned and ground truth point clouds
    false_loss_pc = Lambda(lambda x: tf.reduce_mean(tf.square(tf.subtract(x[0], x[1])), axis=[1,2]))([gt_pc, normlearner_pc])
    false_loss_pc = Reshape(target_shape=(1,), name="pointcloud_mse")(false_loss_pc)
    print("point cloud loss shape: " + str(false_loss_pc.shape))

#    # Learn the offset in parameters from the difference between the ground truth and learned point clouds
#    flattened_gt_pc = Flatten()(gt_pc)
#    flattened_normlearner_pc = Flatten()(normlearner_pc)

#    concat_pc_inputs = Concatenate()([flattened_gt_pc, flattened_normlearner_pc])
#    normlearner_architecture = Dense(256, activation="relu")(concat_pc_inputs)
#    normlearner_architecture = Dense(1024, activation="relu")(normlearner_architecture)
#    delta_d_hat = Dense(85, activation="tanh")(normlearner_architecture)

#    # Calculate the (batched) MSE between the learned and ground truth offset in the parameters
#   false_loss_delta_d_hat = Lambda(lambda x: tf.reduce_mean(tf.square(tf.subtract(x[0], x[1])), axis=1))([delta_d, delta_d_hat])
#    false_loss_delta_d_hat = Reshape(target_shape=(1,), name="delta_d_hat_mse")(false_loss_delta_d_hat)
#    print("delta_d_hat loss shape: " + str(false_loss_delta_d_hat.shape))

    return [normlearner_input, gt_params, gt_pc], [normlearner_params, false_loss_delta_d, normlearner_pc, false_loss_pc]
    def __init__(self, model_path):
        """
    SMPL model.

    Parameter:
    ---------
    model_path: Path to the SMPL model parameters, pre-processed by
    `preprocess.py`.

    """
        #with open(model_path, 'rb') as f:
        #  params = pickle.load(f, encoding="bytes")
        #params = pickle.load(f)
        if True:
            params = load_params(model_path)

            self.J_regressor = params['J_regressor']
            self.weights = params['weights']
            self.posedirs = params['posedirs']
            self.v_template = params['v_template']
            self.shapedirs = params['shapedirs']
            self.faces = params['f']
            self.kintree_table = params['kintree_table']

        id_to_col = {
            self.kintree_table[1, i]: i
            for i in range(self.kintree_table.shape[1])
        }
        self.parent = {
            i: id_to_col[self.kintree_table[0, i]]
            for i in range(1, self.kintree_table.shape[1])
        }

        self.pose_shape = [24, 3]
        self.beta_shape = [10]
        self.trans_shape = [3]

        self.pose = np.zeros(self.pose_shape)
        self.beta = np.zeros(self.beta_shape)
        self.trans = np.zeros(self.trans_shape)

        self.verts = None
        self.J = None
        self.R = None

        self.update()
def EncoderArchitecture(input_shape):
    """ Specify the encoder's network architecture """
    encoder_inputs = Input(shape=input_shape)

    resnet_model = ResNet50(include_top=False,
                            input_tensor=encoder_inputs,
                            input_shape=input_shape,
                            weights=None)
    encoder_architecture = Flatten()(resnet_model.outputs[0])
    encoder_params = Dense(85, activation="tanh")(encoder_architecture)

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde//basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(encoder_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(encoder_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(encoder_params)

    encoder_mesh = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                          input_trans, smpl_params, input_info)

    return [encoder_inputs], [encoder_params, encoder_mesh]
def OptLearnerStaticCosArchitecture(param_trainable, init_wrapper, emb_size=1000):
    """ Optimised learner network architecture """
    # An embedding layer is required to optimise the parameters
    #print('parameter initializer pose '+str(parameter_initializer([1000,85])[0,:15]))
    #print('parameter initializer shape '+str(parameter_initializer([1000,85])[0,72:82]))
    #print('parameter initializer T '+str(parameter_initializer([1000,85])[0,82:85]))
    #exit(1)

    optlearner_input = Input(shape=(1,), name="embedding_index")
    def init_emb_layers(index, param_trainable, init_wrapper):
        """ Initialise the parameter embedding layers """
        emb_layers = []
        num_params = 85
        for i in range(num_params):
            layer_name = "param_{:02d}".format(i)
            initialiser = init_wrapper(param=i, offset=param_trainable[layer_name])

            emb_layer = Embedding(emb_size, 1, name=layer_name, trainable=param_trainable[layer_name], embeddings_initializer=initialiser)(index)
            emb_layers.append(emb_layer)

        return emb_layers

    # Initialise the embedding layers
    emb_layers = init_emb_layers(optlearner_input, param_trainable, init_wrapper)
    optlearner_params = Concatenate(name="parameter_embedding")(emb_layers)

    #optlearner_params = Embedding(1000, 85, embeddings_initializer=parameter_initializer, name="parameter_embedding")(optlearner_input)
    #print("optlearner parameters shape: " +str(optlearner_params.shape))
    optlearner_params = Reshape(target_shape=(85,), name="learned_params")(optlearner_params)
    print("optlearner parameters shape: " +str(optlearner_params.shape))
    #exit(1)

    # Ground truth parameters and point cloud are inputs to the model as well
    gt_params = Input(shape=(85,), name="gt_params")
    gt_pc = Input(shape=(6890, 3), name="gt_pc")
    print("gt parameters shape: " + str(gt_params.shape))
    print("gt point cloud shape: " + str(gt_pc.shape))

    # Compute the true offset (i.e. difference) between the ground truth and learned parameters
    delta_d = Lambda(lambda x: x[0] - x[1], name="delta_d")([gt_params, optlearner_params])
    print("delta_d shape: " + str(delta_d.shape))
    #exit(1)
    # Calculate the (batched) MSE between the learned parameters and the ground truth parameters

    false_loss_delta_d = Lambda(lambda x: K.mean(K.square(x), axis=1))(delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))
    #exit(1)
    false_loss_delta_d = Reshape(target_shape=(1,), name="delta_d_mse")(false_loss_delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))

    # Load SMPL model and get necessary parameters
    smpl_params = load_params('./keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    _, _, input_info = get_parameters()

    input_betas = Lambda(lambda x: x[:, 72:82])(optlearner_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(optlearner_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(optlearner_params)

    # Get the point cloud corresponding to these parameters
    optlearner_pc = Points3DFromSMPLParams(input_betas, input_pose_rodrigues, input_trans, smpl_params, input_info)
    print("optlearner point cloud shape: " + str(optlearner_pc.shape))
    #optlearner_pc = Lambda(lambda x: x * 0.0)[optlearner_pc]

    #exit(1)
#    # Get the (batched) MSE between the learned and ground truth point clouds
#    false_loss_pc = Lambda(lambda x: tf.reduce_mean(tf.square(tf.subtract(x[0], x[1])), axis=[1,2]))([gt_pc, optlearner_pc])
#    false_loss_pc = Reshape(target_shape=(1,), name="pointcloud_mse")(false_loss_pc)
#    print("point cloud loss shape: " + str(false_loss_pc.shape))
    # Get the (batched) Euclidean loss between the learned and ground truth point clouds
    pc_euclidean_diff = Lambda(lambda x: x[0] -  x[1])([gt_pc, optlearner_pc])
    pc_euclidean_dist = Lambda(lambda x: K.sum(K.square(x),axis=-1))(pc_euclidean_diff)
    print('pc euclidean dist '+str(pc_euclidean_dist.shape))
    #exit(1)
    false_loss_pc = Lambda(lambda x: K.mean(x, axis=1))(pc_euclidean_dist)
    false_loss_pc = Reshape(target_shape=(1,), name="pc_mean_euc_dist")(false_loss_pc)
    print("point cloud loss shape: " + str(false_loss_pc.shape))

    # Learn the offset in parameters from the difference between the ground truth and learned point clouds
    #flattened_gt_pc = Flatten()(gt_pc)
    #flattened_optlearner_pc = Flatten()(optlearner_pc)
    #concat_pc_inputs = Concatenate()([flattened_gt_pc, flattened_optlearner_pc])
    pc_euclidean_diff_NOGRAD =  Lambda(lambda x: K.stop_gradient(x))(pc_euclidean_diff) # This is added to avoid influencing embedding layer parameters by a "bad" gradient network
    print("shape of output: " + str(pc_euclidean_diff_NOGRAD.shape))
    index_list=[1850, 1600, 2050, 5350, 5050, 5500]
    item_list = []
    for id in index_list:
	item = Lambda(lambda x: x[:,id:(id+1), :])(pc_euclidean_diff_NOGRAD)
        item_list.append(item)
    pc_euclidean_diff_NOGRAD = Concatenate(axis=-2)(item_list)
    #pc_euclidean_diff_NOGRAD = Lambda(lambda x: x[:, [1850, 1600, 2050, 5350, 5050, 5500], :])(pc_euclidean_diff_NOGRAD)
    print("shape of output: " + str(pc_euclidean_diff_NOGRAD.shape))
    #exit(1)
    pc_euclidean_diff_NOGRAD = Flatten()(pc_euclidean_diff_NOGRAD)
    #pc_euclidean_diff_NOGRAD = Lambda(lambda x: x[:, 5020:5600])(pc_euclidean_diff_NOGRAD)  # restrict the points the network sees to make the learning task easier
    #pc_diff_right_arm = Lambda(lambda x: x[:, 5400:5600])(pc_euclidean_diff_NOGRAD)  # restrict the points the network sees to make the learning task easier
    #pc_diff_left_arm = Lambda(lambda x: x[:, 1850:2050])(pc_euclidean_diff_NOGRAD)  # restrict the points the network sees to make the learning task easier
    #pc_euclidean_diff_NOGRAD = Concatenate()([pc_diff_right_arm, pc_diff_left_arm])
    #pc_diff1 = Lambda(lambda x: x[:, 5350])(pc_euclidean_diff_NOGRAD)
    #pc_diff2 = Lambda(lambda x: x[:, 5050])(pc_euclidean_diff_NOGRAD)
    #pc_diff3 = Lambda(lambda x: x[:, 5500])(pc_euclidean_diff_NOGRAD)
    #pc_euclidean_diff_NOGRAD = Concatenate()([pc_diff1, pc_diff2, pc_diff3])

    #optlearner_architecture = Dense(2**12, activation="relu")(pc_euclidean_diff_NOGRAD)
    optlearner_architecture = Dense(2**11, activation="relu")(pc_euclidean_diff_NOGRAD)
    #optlearner_architecture = Dense(2**10, activation="relu")(pc_euclidean_diff_NOGRAD)
    print('optlearner_architecture '+str(optlearner_architecture.shape))
    #exit(1)
    #optlearner_architecture = Dropout(0.1)(optlearner_architecture)
    #optlearner_architecture = Dense(1024, activation="relu")(optlearner_architecture)
    #optlearner_architecture = Dropout(0.1)(optlearner_architecture)
    #delta_d_hat = Dense(85, activation="linear", name="delta_d_hat")(optlearner_architecture)
    delta_d_hat = Dense(85, activation=scaled_tanh, name="delta_d_hat")(optlearner_architecture)
    print('delta_d_hat shape '+str(delta_d_hat.shape))
    #exit(1)

    # Calculate the (batched) MSE between the learned and ground truth offset in the parameters
    #false_loss_delta_d_hat = Lambda(lambda x: K.mean(K.square(x[0] - x[1]), axis=1))([delta_d, delta_d_hat])
    delta_d_NOGRAD = Lambda(lambda x: K.stop_gradient(x))(delta_d)
    #false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square(x[0] - x[1]), axis=1))([delta_d_NOGRAD, delta_d_hat])
    #false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square(1 - tf.math.cos(x[0] - x[1])), axis=1))([delta_d_NOGRAD, delta_d_hat])
    #false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square(tf.math.sin(0.5*(x[0] - x[1]))), axis=1))([delta_d_NOGRAD, delta_d_hat])
    false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square((x[0] - x[1])*tf.math.sin(0.5*(x[0] - x[1]))) + 0.1*K.abs(x[0] - x[1]), axis=1))([delta_d_NOGRAD, delta_d_hat])
    #false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square(tf.math.tanh(x[0] - x[1])), axis=1))([delta_d_NOGRAD, delta_d_hat])
    false_loss_delta_d_hat = Reshape(target_shape=(1,), name="delta_d_hat_mse")(false_loss_delta_d_hat)
    print("delta_d_hat loss shape: " + str(false_loss_delta_d_hat.shape))

    # Prevent model from using the delta_d_hat gradient in final loss
    delta_d_hat_NOGRAD = Lambda(lambda x: K.stop_gradient(x), name='optlearner_output_NOGRAD')(delta_d_hat)

    # False loss designed to pass the learned offset as a gradient to the embedding layer
    false_loss_smpl = Lambda(lambda x: x[1]*x[0], name="smpl_diff")([optlearner_params, delta_d_hat_NOGRAD])
    print("smpl loss shape: " + str(false_loss_smpl.shape))

    return [optlearner_input, gt_params, gt_pc], [optlearner_params, false_loss_delta_d, optlearner_pc, false_loss_pc, false_loss_delta_d_hat, false_loss_smpl, delta_d, delta_d_hat,delta_d_hat_NOGRAD]
def OptLearnerMeshNormalStaticArchitecture(param_trainable,
                                           init_wrapper,
                                           emb_size=1000):
    """ Optimised learner network architecture """
    # An embedding layer is required to optimise the parameters
    #print('parameter initializer pose '+str(parameter_initializer([1000,85])[0,:15]))
    #print('parameter initializer shape '+str(parameter_initializer([1000,85])[0,72:82]))
    #print('parameter initializer T '+str(parameter_initializer([1000,85])[0,82:85]))
    #exit(1)

    optlearner_input = Input(shape=(1, ), name="embedding_index")

    def init_emb_layers(index, param_trainable, init_wrapper):
        """ Initialise the parameter embedding layers """
        emb_layers = []
        num_params = 85
        for i in range(num_params):
            layer_name = "param_{:02d}".format(i)
            initialiser = init_wrapper(param=i,
                                       offset=param_trainable[layer_name])

            emb_layer = Embedding(emb_size,
                                  1,
                                  name=layer_name,
                                  trainable=param_trainable[layer_name],
                                  embeddings_initializer=initialiser)(index)
            emb_layers.append(emb_layer)

        return emb_layers

    # Initialise the embedding layers
    emb_layers = init_emb_layers(optlearner_input, param_trainable,
                                 init_wrapper)
    optlearner_params = Concatenate(name="parameter_embedding")(emb_layers)

    #optlearner_params = Embedding(1000, 85, embeddings_initializer=parameter_initializer, name="parameter_embedding")(optlearner_input)
    #print("optlearner parameters shape: " +str(optlearner_params.shape))
    optlearner_params = Reshape(target_shape=(85, ),
                                name="learned_params")(optlearner_params)
    print("optlearner parameters shape: " + str(optlearner_params.shape))
    #exit(1)

    # Ground truth parameters and point cloud are inputs to the model as well
    gt_params = Input(shape=(85, ), name="gt_params")
    gt_pc = Input(shape=(6890, 3), name="gt_pc")
    print("gt parameters shape: " + str(gt_params.shape))
    print("gt point cloud shape: " + str(gt_pc.shape))

    # Compute the true offset (i.e. difference) between the ground truth and learned parameters
    #pi = K.constant(np.pi)
    #delta_d = Lambda(lambda x: x[0] - x[1])([gt_params, optlearner_params])
    #delta_d = Lambda(lambda x: K.tf.math.floormod(x - pi, 2*pi) - pi, name="delta_d")(delta_d)  # custom modulo 2pi of delta_d
    #print("delta_d shape: " + str(delta_d.shape))
    delta_d = Lambda(lambda x: x[0] - x[1],
                     name="delta_d")([gt_params, optlearner_params])
    print("delta_d shape: " + str(delta_d.shape))
    #exit(1)
    # Calculate the (batched) MSE between the learned parameters and the ground truth parameters

    false_loss_delta_d = Lambda(lambda x: K.mean(K.square(x), axis=1))(delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))
    #exit(1)
    false_loss_delta_d = Reshape(target_shape=(1, ),
                                 name="delta_d_mse")(false_loss_delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    faces = smpl_params['f']  # canonical mesh faces
    print("faces shape: " + str(faces.shape))
    #exit(1)

    input_betas = Lambda(lambda x: x[:, 72:82])(optlearner_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(optlearner_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(optlearner_params)

    # Get the point cloud corresponding to these parameters
    optlearner_pc = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                           input_trans, smpl_params,
                                           input_info)
    print("optlearner point cloud shape: " + str(optlearner_pc.shape))
    #optlearner_pc = Lambda(lambda x: x * 0.0)[optlearner_pc]

    # Get the (batched) Euclidean loss between the learned and ground truth point clouds
    pc_euclidean_diff = Lambda(lambda x: x[0] - x[1])([gt_pc, optlearner_pc])
    pc_euclidean_dist = Lambda(lambda x: K.sum(K.square(x), axis=-1))(
        pc_euclidean_diff)
    print('pc euclidean dist ' + str(pc_euclidean_dist.shape))
    #exit(1)
    false_loss_pc = Lambda(lambda x: K.mean(x, axis=1))(pc_euclidean_dist)
    false_loss_pc = Reshape(target_shape=(1, ),
                            name="pc_mean_euc_dist")(false_loss_pc)
    print("point cloud loss shape: " + str(false_loss_pc.shape))
    #exit(1)

    #    # Get the (batched) MSE between the learned and ground truth point clouds
    #    false_loss_pc = Lambda(lambda x: tf.reduce_mean(tf.square(tf.subtract(x[0], x[1])), axis=[1,2]))([gt_pc, optlearner_pc])
    #    false_loss_pc = Reshape(target_shape=(1,), name="pointcloud_mse")(false_loss_pc)
    #    print("point cloud loss shape: " + str(false_loss_pc.shape))

    # Gather sets of points and compute their cross product to get mesh normals
    vertex_list = [1850, 1600, 2050, 5350, 5050, 5500]
    face_array = np.array(
        [face for face in faces for vertex in vertex_list if vertex in face])
    #gt_p0 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,0]).astype(np.int32), axis=-2))(gt_pc)
    #gt_p1 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,1]).astype(np.int32), axis=-2))(gt_pc)
    #gt_p2 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,2]).astype(np.int32), axis=-2))(gt_pc)
    #print("gt_p0 shape: " + str(gt_p0.shape))
    #print("gt_p1 shape: " + str(gt_p1.shape))
    #print("gt_p2 shape: " + str(gt_p2.shape))
    #gt_vec1 = Lambda(lambda x: x[1] - x[0])([gt_p0, gt_p1])
    #gt_vec2 = Lambda(lambda x: x[1] - x[0])([gt_p0, gt_p2])
    #print("gt_vec1 shape: " + str(gt_vec1.shape))
    #print("gt_vec2 shape: " + str(gt_vec2.shape))
    #gt_normals = Lambda(lambda x: K.l2_normalize(K.tf.cross(x[0], x[1]), axis=-1), name="gt_cross_product")([gt_vec1, gt_vec2])
    #gt_normals = get_mesh_normals(gt_pc, faces, layer_name="gt_cross_product")
    gt_normals = get_mesh_normals(gt_pc,
                                  face_array,
                                  layer_name="gt_cross_product")
    print("gt_normals shape: " + str(gt_normals.shape))

    #opt_p0 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,0]).astype(np.int32), axis=-2))(optlearner_pc)
    #opt_p1 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,1]).astype(np.int32), axis=-2))(optlearner_pc)
    #opt_p2 = Lambda(lambda x: K.tf.gather(x, np.array(faces[:,2]).astype(np.int32), axis=-2))(optlearner_pc)
    #print("opt_p0 shape: " + str(opt_p0.shape))
    #print("opt_p1 shape: " + str(opt_p1.shape))
    #print("opt_p2 shape: " + str(opt_p2.shape))
    #opt_vec1 = Lambda(lambda x: x[1] - x[0])([opt_p0, opt_p1])
    #opt_vec2 = Lambda(lambda x: x[1] - x[0])([opt_p0, opt_p2])
    #print("opt_vec1 shape: " + str(opt_vec1.shape))
    #print("opt_vec2 shape: " + str(opt_vec2.shape))
    #opt_normals = Lambda(lambda x: K.l2_normalize(K.tf.cross(x[0], x[1]), axis=-1), name="opt_cross_product")([opt_vec1, opt_vec2])
    #opt_normals = get_mesh_normals(optlearner_pc, faces, layer_name="opt_cross_product")
    opt_normals = get_mesh_normals(optlearner_pc,
                                   face_array,
                                   layer_name="opt_cross_product")
    print("opt_normals shape: " + str(opt_normals.shape))
    #exit(1)

    # Learn the offset in parameters from the difference between the ground truth and learned mesh normals
    diff_normals = Lambda(lambda x: K.tf.cross(x[0], x[1]),
                          name="diff_cross_product")([gt_normals, opt_normals])
    #pc_euclidean_diff_NOGRAD =  Lambda(lambda x: K.stop_gradient(x))(pc_euclidean_diff) # This is added to avoid influencing embedding layer parameters by a "bad" gradient network
    diff_normals_NOGRAD = Lambda(lambda x: K.stop_gradient(x))(
        diff_normals
    )  # This is added to avoid influencing embedding layer parameters by a "bad" gradient network
    print("diff_normals_NOGRAD shape: " + str(diff_normals_NOGRAD.shape))

    # Get the mesh normal angle magnitudes (for evaluation)
    gt_angles = Lambda(lambda x: K.tf.norm(x, axis=-1),
                       name="gt_angles")(gt_normals)
    print("gt_angles shape: " + str(gt_angles.shape))
    opt_angles = Lambda(lambda x: K.tf.norm(x, axis=-1),
                        name="opt_angles")(opt_normals)
    print("opt_angles shape: " + str(opt_angles.shape))
    diff_angles = Lambda(lambda x: K.tf.norm(x, axis=-1),
                         name="diff_angles")(diff_normals_NOGRAD)
    print("diff_angles shape: " + str(diff_angles.shape))
    #exit(1)

    # Keep every 5xth normal entry
    #indices = np.array([i for i in enumerate()])
    #diff_normals_NOGRAD = Lambda(lambda x: x[:, ::10], name="reduce_num_normals")(diff_normals_NOGRAD)
    diff_normals_NOGRAD = Flatten()(diff_normals_NOGRAD)

    optlearner_architecture = Dense(2**11,
                                    activation="relu")(diff_normals_NOGRAD)
    #optlearner_architecture = Dense(2**12, activation="relu")(diff_normals_NOGRAD)
    optlearner_architecture = BatchNormalization()(optlearner_architecture)
    optlearner_architecture = Dropout(0.5)(optlearner_architecture)
    #optlearner_architecture = Dense(2**10, activation="relu")(optlearner_architecture)
    #optlearner_architecture = BatchNormalization()(optlearner_architecture)
    print('optlearner_architecture shape: ' +
          str(optlearner_architecture.shape))
    #exit(1)
    #optlearner_architecture = Dropout(0.1)(optlearner_architecture)
    #optlearner_architecture = Dense(1024, activation="relu")(optlearner_architecture)
    #optlearner_architecture = Dropout(0.1)(optlearner_architecture)
    delta_d_hat = Dense(85, activation="linear",
                        name="delta_d_hat")(optlearner_architecture)
    print('delta_d_hat shape: ' + str(delta_d_hat.shape))
    #exit(1)

    # Calculate the (batched) MSE between the learned and ground truth offset in the parameters
    #false_loss_delta_d_hat = Lambda(lambda x: K.mean(K.square(x[0] - x[1]), axis=1))([delta_d, delta_d_hat])
    delta_d_NOGRAD = Lambda(lambda x: K.stop_gradient(x),
                            name="delta_d_NOGRAD")(delta_d)
    #false_loss_delta_d_hat = Lambda(lambda x: K.sum(K.square(x[0] - x[1]), axis=1))([delta_d_NOGRAD, delta_d_hat])
    false_loss_delta_d_hat = Lambda(
        lambda x: K.mean(K.square(x[0] - x[1]), axis=1))(
            [delta_d_NOGRAD, delta_d_hat])
    false_loss_delta_d_hat = Reshape(
        target_shape=(1, ), name="delta_d_hat_mse")(false_loss_delta_d_hat)
    print("delta_d_hat loss shape: " + str(false_loss_delta_d_hat.shape))

    # Prevent model from using the delta_d_hat gradient in final loss
    delta_d_hat_NOGRAD = Lambda(lambda x: K.stop_gradient(x),
                                name='optlearner_output_NOGRAD')(delta_d_hat)

    # False loss designed to pass the learned offset as a gradient to the embedding layer
    false_loss_smpl = Lambda(lambda x: x[1] * x[0], name="smpl_diff")(
        [optlearner_params, delta_d_hat_NOGRAD])
    print("smpl loss shape: " + str(false_loss_smpl.shape))

    #return [optlearner_input, gt_params, gt_pc], [optlearner_params, false_loss_delta_d, optlearner_pc, false_loss_pc, false_loss_delta_d_hat, false_loss_smpl, delta_d, delta_d_hat,delta_d_hat_NOGRAD]
    return [optlearner_input, gt_params, gt_pc], [
        optlearner_params, false_loss_delta_d, optlearner_pc, false_loss_pc,
        false_loss_delta_d_hat, false_loss_smpl, delta_d, delta_d_hat,
        delta_d_hat_NOGRAD, gt_angles, opt_angles, diff_angles
    ]
def CustomEncoderArchitecture(input_shape):
    """ Specify the encoder's network architecture """
    encoder_inputs = Input(shape=input_shape)
    #encoder_architecture = encoder_inputs
    #vgg19 = VGG19(include_top=False, input_shape=input_shape, weights=None)
    #for layer in vgg19.layers:
    #    encoder_architecture = layer(encoder_architecture)

    # Network architecture (VGG19)
    # Block 1
    encoder_architecture = Conv2D(64, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_inputs)
    encoder_architecture = Conv2D(64, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 2
    encoder_architecture = Conv2D(128, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(128, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 3
    encoder_architecture = Conv2D(256, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(256, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(256, (3, 3), padding="same", activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 4
    encoder_architecture = Conv2D(256, (3, 3),
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(256, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(256, (3, 3), activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 5
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 6
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), padding="same", activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), padding="same", activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = AveragePooling2D((3, 3))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Dense layers
    encoder_architecture = Flatten()(encoder_architecture)
    encoder_architecture = Dense(1024)(encoder_architecture)
    encoder_architecture = Dropout(0.5)(encoder_architecture)
    encoder_architecture = Dense(512)(encoder_architecture)
    encoder_architecture = Dropout(0.5)(encoder_architecture)

    encoder_params = Dense(85, activation="tanh")(encoder_architecture)

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde//basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(encoder_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(encoder_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(encoder_params)

    encoder_mesh = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                          input_trans, smpl_params, input_info)
    #encoder_layer = Lambda(Points3DFromSMPLParams, output_shape=(6890, 3))
    #encoder_mesh = encoder_layer([encoder_architecture[:, 72:82], encoder_architecture[:, :72], encoder_architecture[:, 82:85], smpl_params, input_info])
    #encoder_outputs = [encoder_architecture]#, encoder_mesh]
    #encoder_outputs = [encoder_architecture, encoder_mesh]
    #print(encoder_inputs)
    #exit(1)

    return [encoder_inputs], [encoder_params, encoder_mesh]
def LocEncoderArchitecture(img_shape, mesh_shape=(6890, 3)):
    """ Localised-learning encoder """
    encoder_inputs = Input(shape=input_shape)
    #encoder_architecture = encoder_inputs
    #vgg19 = VGG19(include_top=False, input_shape=input_shape, weights=None)
    #for layer in vgg19.layers:
    #    encoder_architecture = layer(encoder_architecture)

    # Network architecture (VGG19)
    # Block 1
    encoder_architecture = Conv2D(64, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_inputs)
    encoder_architecture = Conv2D(64, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 2
    encoder_architecture = Conv2D(128, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(128, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 3
    encoder_architecture = Conv2D(256, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(256, (3, 3),
                                  padding="same",
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(256, (3, 3), padding="same", activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 4
    encoder_architecture = Conv2D(256, (3, 3),
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(256, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(256, (3, 3), activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 5
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = MaxPooling2D((2, 2))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Block 6
    encoder_architecture = Conv2D(512, (3, 3),
                                  activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), padding="same", activation="relu")(encoder_architecture)
    #encoder_architecture = Conv2D(512, (3, 3), padding="same", activation="relu")(encoder_architecture)
    encoder_architecture = BatchNormalization()(encoder_architecture)
    encoder_architecture = AveragePooling2D((3, 3))(encoder_architecture)
    encoder_architecture = Dropout(0.25)(encoder_architecture)

    # Dense layers
    encoder_architecture = Flatten()(encoder_architecture)
    encoder_architecture = Dense(1024)(encoder_architecture)
    encoder_architecture = Dropout(0.5)(encoder_architecture)
    encoder_architecture = Dense(512)(encoder_architecture)
    encoder_architecture = Dropout(0.5)(encoder_architecture)

    encoder_params = Dense(85, activation="tanh")(encoder_architecture)

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde//basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(encoder_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(encoder_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(encoder_params)

    encoder_mesh = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                          input_trans, smpl_params, input_info)

    loclearner_input = Input(shape=mesh_shape)
    flattened_input1 = Flatten(loclearner_input)
    flattened_input2 = Flatten(encoder_mesh)

    concat_inputs = Concatenate()([flattened_input1, flattened_input2])
    loclearner_architecture = Dense(1024)(concat_inputs)
    loclearner_output = Dense(3)(loclearner_architecture)

    # Prevent model from using the loclearner gradient
    loclearner_output_NOGRAD = Lambda(
        lambda x: K.stop_gradient(x),
        name='loclearner_output__NOGRAD')(loclearner_output)

    return [encoder_input, loclearner_input
            ], [encoder_architecture, encoder_mesh, loclearner_output_NOGRAD]
Exemple #11
0
def OptLearnerStaticArchitecture(param_trainable, init_wrapper, emb_size=1000):
    """ Optimised learner network architecture """
    # An embedding layer is required to optimise the parameters
    optlearner_input = Input(shape=(1, ), name="embedding_index")

    # Initialise the embedding layers
    emb_layers = init_emb_layers(optlearner_input, emb_size, param_trainable,
                                 init_wrapper)
    optlearner_params = Concatenate(name="parameter_embedding")(emb_layers)
    optlearner_params = Reshape(target_shape=(85, ),
                                name="learned_params")(optlearner_params)
    print("optlearner parameters shape: " + str(optlearner_params.shape))
    #exit(1)

    # Ground truth parameters and point cloud are inputs to the model as well
    gt_params = Input(shape=(85, ), name="gt_params")
    gt_pc = Input(shape=(6890, 3), name="gt_pc")
    print("gt parameters shape: " + str(gt_params.shape))
    print("gt point cloud shape: " + str(gt_pc.shape))

    # Compute the true offset (i.e. difference) between the ground truth and learned parameters
    delta_d = Lambda(lambda x: x[0] - x[1],
                     name="delta_d")([gt_params, optlearner_params])
    print("delta_d shape: " + str(delta_d.shape))
    #exit(1)

    # Calculate the (batched) MSE between the learned parameters and the ground truth parameters
    false_loss_delta_d = Lambda(lambda x: K.mean(K.square(x), axis=1))(delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))
    #exit(1)
    false_loss_delta_d = Reshape(target_shape=(1, ),
                                 name="delta_d_mse")(false_loss_delta_d)
    print("delta_d loss shape: " + str(false_loss_delta_d.shape))

    # Load SMPL model and get necessary parameters
    smpl_params = load_params(
        './keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
    )
    _, _, input_info = get_parameters()
    input_betas = Lambda(lambda x: x[:, 72:82])(optlearner_params)
    input_pose_rodrigues = Lambda(lambda x: x[:, 0:72])(optlearner_params)
    input_trans = Lambda(lambda x: x[:, 82:85])(optlearner_params)

    # Get the point cloud corresponding to these parameters
    optlearner_pc = Points3DFromSMPLParams(input_betas, input_pose_rodrigues,
                                           input_trans, smpl_params,
                                           input_info)
    print("optlearner point cloud shape: " + str(optlearner_pc.shape))
    #exit(1)

    # Get the (batched) Euclidean loss between the learned and ground truth point clouds
    pc_euclidean_diff = Lambda(lambda x: x[0] - x[1])([gt_pc, optlearner_pc])
    pc_euclidean_dist = Lambda(lambda x: K.sum(K.square(x), axis=-1))(
        pc_euclidean_diff)
    print('pc euclidean dist ' + str(pc_euclidean_dist.shape))
    #exit(1)
    false_loss_pc = Lambda(lambda x: K.mean(x, axis=1))(pc_euclidean_dist)
    false_loss_pc = Reshape(target_shape=(1, ),
                            name="pc_mean_euc_dist")(false_loss_pc)
    print("point cloud loss shape: " + str(false_loss_pc.shape))

    # Learn the offset in parameters from the difference between the ground truth and learned point clouds
    pc_euclidean_diff_NOGRAD = Lambda(lambda x: K.stop_gradient(x))(
        pc_euclidean_diff
    )  # This is added to avoid influencing embedding layer parameters by a "bad" gradient network
    print("shape of output: " + str(pc_euclidean_diff_NOGRAD.shape))
    vertices = [1850, 1600, 2050, 1300, 5350, 5050, 5500]
    pc_euclidean_diff_NOGRAD = Lambda(lambda x: K.tf.gather(
        x, np.array(vertices).astype(np.int32), axis=-2))(
            pc_euclidean_diff_NOGRAD)
    print("shape of output: " + str(pc_euclidean_diff_NOGRAD.shape))
    #exit(1)
    pc_euclidean_diff_NOGRAD = Flatten()(pc_euclidean_diff_NOGRAD)

    optlearner_architecture = Dense(
        2**11, activation="relu")(pc_euclidean_diff_NOGRAD)
    optlearner_architecture = BatchNormalization()(optlearner_architecture)
    optlearner_architecture = Dropout(0.5)(optlearner_architecture)
    print('optlearner_architecture ' + str(optlearner_architecture.shape))
    #exit(1)
    delta_d_hat = Dense(85, activation="linear",
                        name="delta_d_hat")(optlearner_architecture)
    print('delta_d_hat shape ' + str(delta_d_hat.shape))
    #exit(1)

    # Calculate the (batched) MSE between the learned and ground truth offset in the parameters
    delta_d_NOGRAD = Lambda(lambda x: K.stop_gradient(x))(delta_d)
    false_loss_delta_d_hat = Lambda(
        lambda x: K.sum(K.square(x[0] - x[1]), axis=1))(
            [delta_d_NOGRAD, delta_d_hat])
    false_loss_delta_d_hat = Reshape(
        target_shape=(1, ), name="delta_d_hat_mse")(false_loss_delta_d_hat)
    print("delta_d_hat loss shape: " + str(false_loss_delta_d_hat.shape))
    false_sin_loss_delta_d_hat = get_sin_metric(delta_d_NOGRAD, delta_d_hat)
    print("delta_d_hat sin loss shape: " +
          str(false_sin_loss_delta_d_hat.shape))

    # Prevent model from using the delta_d_hat gradient in final loss
    delta_d_hat_NOGRAD = Lambda(lambda x: K.stop_gradient(x),
                                name='optlearner_output_NOGRAD')(delta_d_hat)

    # False loss designed to pass the learned offset as a gradient to the embedding layer
    false_loss_smpl = Lambda(lambda x: x[1] * x[0], name="smpl_diff")(
        [optlearner_params, delta_d_hat_NOGRAD])
    print("smpl loss shape: " + str(false_loss_smpl.shape))

    return [optlearner_input, gt_params, gt_pc], [
        optlearner_params, false_loss_delta_d, optlearner_pc, false_loss_pc,
        false_loss_delta_d_hat, false_sin_loss_delta_d_hat, false_loss_smpl,
        delta_d, delta_d_hat, delta_d_hat_NOGRAD
    ]