Exemple #1
0
    def __init__(self,
                 TARGET_OFFSET,
                 TARGET_PARAMS_TO_OFFSET,
                 PARAM_OFFSET,
                 PARAMS_TO_OFFSET,
                 SMPL,
                 BATCH_SIZE=1,
                 opt_lr=0.05,
                 opt_iter=5,
                 reward_factor=100,
                 reward_scale=0.2,
                 step_limit=100,
                 epsilon=1e-3):
        # Environment configuration parameters
        self.POSE_OFFSET = A2CEnv.format_offset_dict(TARGET_OFFSET,
                                                     TARGET_PARAMS_TO_OFFSET)
        self.PARAM_OFFSET = A2CEnv.format_offset_dict(PARAM_OFFSET,
                                                      PARAMS_TO_OFFSET)
        self.smpl = SMPL
        self.BATCH_SIZE = BATCH_SIZE

        # Parameters for batched TF implementation of SMPL model
        self.smpl_params, self.input_info, self.faces = load_smpl_params()

        # Reward configuration parameters
        self.opt_lr = opt_lr
        self.opt_iter = opt_iter
        self.reward_factor = reward_factor
        self.reward_scale = reward_scale
        self.step_limit = step_limit
        self.epsilon = epsilon

        # Store environment states etc.
        self.target_params, self.target_pcs = self.generate_targets()
        self.pred_params, self.pred_pcs = self.generate_preds()
        self.steps_taken = np.zeros((self.BATCH_SIZE))
        self.done = np.array([False for i in range(self.BATCH_SIZE)])
        self.reward_sum = np.zeros((self.BATCH_SIZE))

        # Initialise gradient descent model
        self.GD_model = self.init_GD_model()
Exemple #2
0
def construct_optlearner_model(ARCHITECTURE,
                               param_trainable,
                               emb_initialiser,
                               data_samples,
                               INPUT_TYPE,
                               GROUPS=[],
                               update_weight=1.0,
                               DROPOUT=0.0,
                               PARAM_2D_TRAINABLE=False,
                               INCLUDE_SHAPE=True):
    smpl_params, input_info, faces = load_smpl_params()
    print("Optimiser architecture: " + str(ARCHITECTURE))

    optlearner_inputs, optlearner_outputs = architecture_inputs_and_outputs(
        ARCHITECTURE, param_trainable, emb_initialiser, smpl_params,
        input_info, faces, data_samples, INPUT_TYPE, GROUPS, update_weight,
        DROPOUT, PARAM_2D_TRAINABLE, INCLUDE_SHAPE)
    print("optlearner inputs " + str(optlearner_inputs))
    print("optlearner outputs " + str(optlearner_outputs))
    optlearner_model = Model(inputs=optlearner_inputs,
                             outputs=optlearner_outputs)

    return optlearner_model
#opt_lr = 0.5
#opt_epochs = 20
#opt_cb = OptimisationCallback(OPTIMISATION_PERIOD, opt_epochs, opt_lr, opt_logs_dir, smpl, X_data, train_inputs=X_cb, train_silh=silh_cb, pred_path=opt_vis_dir, period=1, trainable_params=trainable_params, visualise=False)


def print_summary_wrapper(path):
    def print_summary(s):
        with open(path + "model_summary.txt", 'a') as f:
            f.write(s)
            print(s)

    return print_summary


# Build and compile the model
smpl_params, input_info, faces = load_smpl_params()
print("Optimiser architecture: " + str(ARCHITECTURE))
if ARCHITECTURE == "OptLearnerMeshNormalStaticModArchitecture":
    optlearner_inputs, optlearner_outputs = OptLearnerMeshNormalStaticModArchitecture(
        param_trainable=param_trainable,
        init_wrapper=emb_initialiser,
        smpl_params=smpl_params,
        input_info=input_info,
        faces=faces,
        emb_size=data_samples,
        input_type=INPUT_TYPE)
elif ARCHITECTURE == "BasicFCOptLearnerStaticArchitecture":
    optlearner_inputs, optlearner_outputs = BasicFCOptLearnerStaticArchitecture(
        param_trainable=param_trainable,
        init_wrapper=emb_initialiser,
        smpl_params=smpl_params,