Example #1
0
def init_model_with_schemas(
	model_name, 
	sig_input_dim, tanh_input_dim,
	pred_dim
):
	workspace.ResetWorkspace()
	input_record_schema = schema.Struct(
		('sig_input', schema.Scalar((np.float32, (sig_input_dim, )))),
		('tanh_input', schema.Scalar((np.float32, (tanh_input_dim, ))))
	)
	output_record_schema = schema.Struct(
		('loss', schema.Scalar((np.float32, (1, )))),
		('pred', schema.Scalar((np.float32, (pred_dim, ))))
	)
	# use trainer_extra_schema as the loss input record
	trainer_extra_schema = schema.Struct(
		('label', schema.Scalar((np.float32, (pred_dim, )))),
		('prediction', schema.Scalar((np.float32, (pred_dim, ))))
	)
	model = layer_model_helper.LayerModelHelper(
		model_name,
		input_record_schema,
		trainer_extra_schema
	)
	model.output_schema = output_record_schema
	return model
Example #2
0
 def reset_model(self, input_feature_schema=None, trainer_extra_schema=None):
     input_feature_schema = input_feature_schema or schema.Struct(
         ('float_features', schema.Scalar((np.float32, (32,)))),
     )
     trainer_extra_schema = trainer_extra_schema or schema.Struct()
     self.model = layer_model_helper.LayerModelHelper(
         'test_model',
         input_feature_schema=input_feature_schema,
         trainer_extra_schema=trainer_extra_schema)
Example #3
0
    def setUp(self):
        super(TestLayers, self).setUp()
        input_feature_schema = schema.Struct(
            ('float_features', schema.Scalar((np.float32, (32, )))), )
        trainer_extra_schema = schema.Struct()

        self.model = layer_model_helper.LayerModelHelper(
            'test_model',
            input_feature_schema=input_feature_schema,
            trainer_extra_schema=trainer_extra_schema)
Example #4
0
def init_model_with_schemas(
    model_name,
    input_dim,
    output_dim,
):
    workspace.ResetWorkspace()
    input_record_schema = schema.Struct(
        ('origin_input', schema.Scalar((np.float32, (input_dim, )))),
        ('adjoint_input', schema.Scalar((np.float32, (output_dim, )))))
    output_record_schema = schema.Struct(
        ('loss', schema.Scalar((np.float32, (input_dim, )))),
        ('origin_pred', schema.Scalar((np.float32, (output_dim, )))),
        ('adjoint_pred', schema.Scalar((np.float32, (input_dim, )))))
    # use trainer_extra_schema as the loss input record
    pred_dim = input_dim  # for adjoint nn, the pred_dim is the same as original input
    trainer_extra_schema = schema.Struct(
        ('label', schema.Scalar((np.float32, (pred_dim, )))),
        ('prediction', schema.Scalar((np.float32, (pred_dim, )))))
    model = layer_model_helper.LayerModelHelper(model_name,
                                                input_record_schema,
                                                trainer_extra_schema)
    model.output_schema = output_record_schema
    return model
Example #5
0
def init_model_with_schemas(model_name,
                            sig_input_dim,
                            tanh_input_dim,
                            pred_dim,
                            train_target=TrainTarget.ADJOINT):
    '''
	 output_records have to filled with existing blobs.
	'''
    workspace.ResetWorkspace()
    print('>>> Training Target: ' + train_target)
    if train_target == TrainTarget.ADJOINT:
        # When training the adjoint network, we also need to forward pass
        # through the origin network
        input_record_schema = schema.Struct(
            ('sig_input', schema.Scalar(
                (np.float32, (sig_input_dim, )))),  # sig
            ('tanh_input', schema.Scalar(
                (np.float32, (tanh_input_dim, )))),  # tanh
            ('adjoint_input', schema.Scalar((np.float32, (pred_dim, )))))
        output_record_schema = schema.Struct(
            ('loss', schema.Scalar((np.float32, (1, )))),
            ('origin_pred', schema.Scalar((np.float32, (pred_dim, )))),
            ('sig_adjoint_pred', schema.Scalar(
                (np.float32, (sig_input_dim, )))),
            ('tanh_adjoint_pred',
             schema.Scalar((np.float32, (tanh_input_dim, )))),
        )
        # use trainer_extra_schema as the loss input record
        trainer_extra_schema = schema.Struct(
            ('sig_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (sig_input_dim, )))),
                 ('prediction', schema.Scalar(
                     (np.float32, (sig_input_dim, )))))),
            ('tanh_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (tanh_input_dim, )))),
                 ('prediction', schema.Scalar(
                     (np.float32, (tanh_input_dim, )))))),
            ('origin_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (pred_dim, )))),
                 ('prediction', schema.Scalar((np.float32, (pred_dim, )))))),
        )
    if train_target == TrainTarget.ORIGIN:
        # When training the origin network, no need of the adjoint network
        input_record_schema = schema.Struct(
            ('sig_input', schema.Scalar(
                (np.float32, (sig_input_dim, )))),  # sig
            ('tanh_input', schema.Scalar(
                (np.float32, (tanh_input_dim, )))),  # tanh
            ('adjoint_input', schema.Scalar((np.float32, (pred_dim, )))))
        output_record_schema = schema.Struct(
            ('loss', schema.Scalar((np.float32, (1, )))),
            ('origin_pred', schema.Scalar((np.float32, (pred_dim, )))),
        )
        # use trainer_extra_schema as the loss input record
        trainer_extra_schema = schema.Struct(
            ('origin_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (pred_dim, )))),
                 ('prediction', schema.Scalar((np.float32, (pred_dim, )))))), )

    model = layer_model_helper.LayerModelHelper(model_name,
                                                input_record_schema,
                                                trainer_extra_schema)
    model.output_schema = output_record_schema
    return model