Пример #1
0
    def test_regularizer_context(self, X):
        weight_reg_out = L1Norm(0.2)
        bias_reg_out = L1Norm(0)
        regularizers = {
            'WEIGHT': weight_reg_out,
            'BIAS': bias_reg_out
        }

        output_dims = 2
        input_record = self.new_record(schema.Scalar((np.float32, (5,))))
        schema.FeedRecord(input_record, [X])

        with UseRegularizer(regularizers):
            weight_reg = RegularizerContext.current().get_regularizer('WEIGHT')
            bias_reg = RegularizerContext.current().get_regularizer('BIAS')
            optim = SgdOptimizer(0.15)

            assert weight_reg == weight_reg_out, \
                'fail to get correct weight reg from context'
            assert bias_reg == bias_reg_out, \
                'fail to get correct bias reg from context'
            fc_output = self.model.FC(
                input_record,
                output_dims,
                weight_optim=optim,
                bias_optim=optim,
                weight_reg=weight_reg,
                bias_reg=bias_reg
            )
            # model.output_schema has to a struct
            self.model.output_schema = schema.Struct((
                'fc_output', fc_output
            ))

            self.assertEqual(
                schema.Scalar((np.float32, (output_dims, ))),
                fc_output
            )

            _, train_net = layer_model_instantiator.generate_training_nets(self.model)
            ops = train_net.Proto().op
            ops_type_list = [ops[i].type for i in range(len(ops))]
            assert ops_type_list.count('LpNorm') == 2
            assert ops_type_list.count('Scale') == 4
            assert ops_type_list.count('LpNormGradient') == 2
Пример #2
0
    def test_regularizer_context(self, X):
        weight_reg_out = L1Norm(0.2)
        bias_reg_out = L1Norm(0)
        regularizers = {"WEIGHT": weight_reg_out, "BIAS": bias_reg_out}

        output_dims = 2
        input_record = self.new_record(schema.Scalar((np.float32, (5, ))))
        schema.FeedRecord(input_record, [X])

        with UseRegularizer(regularizers):
            weight_reg = RegularizerContext.current().get_regularizer("WEIGHT")
            bias_reg = RegularizerContext.current().get_regularizer("BIAS")
            optim = SgdOptimizer(0.15)

            assert (weight_reg == weight_reg_out
                    ), "fail to get correct weight reg from context"
            assert bias_reg == bias_reg_out, "fail to get correct bias reg from context"
            fc_output = self.model.FC(
                input_record,
                output_dims,
                weight_optim=optim,
                bias_optim=optim,
                weight_reg=weight_reg,
                bias_reg=bias_reg,
            )
            # model.output_schema has to a struct
            self.model.output_schema = schema.Struct(("fc_output", fc_output))

            self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                             fc_output)

            _, train_net = layer_model_instantiator.generate_training_nets(
                self.model)
            ops = train_net.Proto().op
            ops_type_list = [ops[i].type for i in range(len(ops))]
            assert ops_type_list.count("LpNorm") == 2
            assert ops_type_list.count("Scale") == 4
            assert ops_type_list.count("LpNormGradient") == 2
Пример #3
0
# Build model
(origin_pred, sig_adjoint_pred, tanh_adjoint_pred,
 loss) = build_adjoint_pinn(model,
                            sig_net_dim=[10, 1],
                            tanh_net_dim=[10, 1],
                            weight_optim=optimizer.AdagradOptimizer(
                                alpha=0.01,
                                epsilon=1e-4,
                            ),
                            bias_optim=optimizer.AdagradOptimizer(
                                alpha=0.01,
                                epsilon=1e-4,
                            ))

# Train the model
train_init_net, train_net = instantiator.generate_training_nets(model)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
num_iter = 1000
eval_num_iter = 100
# loss_lst = []
for i in range(eval_num_iter):
    workspace.RunNet(train_net.Proto().name, num_iter=num_iter)
    print(schema.FetchRecord(loss).get())

X_pred = np.squeeze(
    schema.FetchRecord(model.input_feature_schema.sig_input).get()).reshape(
        X.shape)
Y_pred = np.squeeze(
    schema.FetchRecord(model.input_feature_schema.tanh_input).get()).reshape(
        X.shape)
Пример #4
0
    def build_nets(
        self,
        hidden_sig_dims,
        hidden_tanh_dims,
        train_batch_size=1,
        eval_batch_size=1,
        weight_optim_method='AdaGrad',
        weight_optim_param={
            'alpha': 0.01,
            'epsilon': 1e-4
        },
        bias_optim_method='AdaGrad',
        bias_optim_param={
            'alpha': 0.01,
            'epsilon': 1e-4
        },
        loss_function='scaled_l1',
        max_loss_scale=1e6,
    ):
        assert len(self.input_data_store) > 0, 'Input data store is empty.'
        assert 'train' in self.input_data_store, 'Missing training data.'
        self.batch_size = train_batch_size
        # Build the date reader net for train net
        input_data_train = data_reader.build_input_reader(
            self.model,
            self.input_data_store['train'][0],
            'minidb',
            ['sig_input', 'tanh_input', 'label'],
            batch_size=train_batch_size,
            data_type='train',
        )

        if 'eval' in self.input_data_store:
            # Build the data reader net for eval net
            input_data_eval = data_reader.build_input_reader(
                self.model,
                self.input_data_store['eval'][0],
                'minidb',
                ['eval_sig_input', 'eval_tanh_input', 'eval_label'],
                batch_size=eval_batch_size,
                data_type='eval',
            )

        # Build the computational nets
        # Create train net
        self.model.input_feature_schema.sig_input.set_value(
            input_data_train[0].get(), unsafe=True)
        self.model.input_feature_schema.tanh_input.set_value(
            input_data_train[1].get(), unsafe=True)
        self.model.trainer_extra_schema.label.set_value(
            input_data_train[2].get(), unsafe=True)

        self.pred, self.loss = build_pinn(
            self.model,
            sig_net_dim=hidden_sig_dims,
            tanh_net_dim=hidden_tanh_dims,
            weight_optim=_build_optimizer(weight_optim_method,
                                          weight_optim_param),
            bias_optim=_build_optimizer(bias_optim_method, bias_optim_param),
            loss_function=loss_function,
            max_loss_scale=max_loss_scale)

        train_init_net, train_net = instantiator.generate_training_nets(
            self.model)
        workspace.RunNetOnce(train_init_net)
        workspace.CreateNet(train_net)
        self.net_store['train_net'] = train_net

        pred_net = instantiator.generate_predict_net(self.model)
        workspace.CreateNet(pred_net)
        self.net_store['pred_net'] = pred_net

        if 'eval' in self.input_data_store:
            # Create eval net
            self.model.input_feature_schema.sig_input.set_value(
                input_data_eval[0].get(), unsafe=True)
            self.model.input_feature_schema.tanh_input.set_value(
                input_data_eval[1].get(), unsafe=True)
            self.model.trainer_extra_schema.label.set_value(
                input_data_eval[2].get(), unsafe=True)
            eval_net = instantiator.generate_eval_net(self.model)
            workspace.CreateNet(eval_net)
            self.net_store['eval_net'] = eval_net
Пример #5
0
    def build_nets(
        self,
        hidden_sig_dims,
        hidden_tanh_dims,
        train_batch_size=1,
        eval_batch_size=1,
        weight_optim_method='AdaGrad',
        weight_optim_param={
            'alpha': 0.01,
            'epsilon': 1e-4
        },
        bias_optim_method='AdaGrad',
        bias_optim_param={
            'alpha': 0.01,
            'epsilon': 1e-4
        },
        loss_function='scaled_l1',
        max_loss_scale=1.,
    ):
        assert len(self.input_data_store) > 0, 'Input data store is empty.'
        assert 'train' in self.input_data_store, 'Missing training data.'
        self.batch_size = train_batch_size

        # Build the date reader net for train net
        if self.train_target == TrainTarget.ORIGIN:
            input_data_train = data_reader.build_input_reader(
                self.model,
                self.input_data_store['train'][0],
                'minidb',
                ['sig_input', 'tanh_input', 'label'],
                batch_size=train_batch_size,
                data_type='train',
            )
            if 'eval' in self.input_data_store:
                # Build the data reader net for eval net
                input_data_eval = data_reader.build_input_reader(
                    self.model,
                    self.input_data_store['eval'][0],
                    'minidb',
                    ['eval_sig_input', 'eval_tanh_input', 'eval_label'],
                    batch_size=eval_batch_size,
                    data_type='eval',
                )

            if self.net_builder == TrainTarget.ADJOINT:  # Use Adjoint net so output adjoint net
                # for training origin, use origin_loss_record
                self.model.trainer_extra_schema.origin_loss_record.label.set_value(
                    input_data_train[2].get(), unsafe=True)
            elif self.net_builder == TrainTarget.ORIGIN:
                self.model.trainer_extra_schema.label.set_value(
                    input_data_train[2].get(), unsafe=True)

        if self.train_target == TrainTarget.ADJOINT:
            raise Exception('Not Implemented')

        # Build the computational nets
        # Create train net
        self.model.input_feature_schema.sig_input.set_value(
            input_data_train[0].get(), unsafe=True)
        self.model.input_feature_schema.tanh_input.set_value(
            input_data_train[1].get(), unsafe=True)

        if self.net_builder == TrainTarget.ADJOINT:
            (self.pred, self.sig_adjoint_pred, self.tanh_adjoint_pred,
             self.loss) = build_adjoint_pinn(
                 self.model,
                 sig_input_dim=self.sig_input_dim,
                 tanh_input_dim=self.tanh_input_dim,
                 sig_net_dim=hidden_sig_dims,
                 tanh_net_dim=hidden_tanh_dims,
                 weight_optim=_build_optimizer(weight_optim_method,
                                               weight_optim_param),
                 bias_optim=_build_optimizer(bias_optim_method,
                                             bias_optim_param),
                 adjoint_tag=self.adjoint_tag,
                 train_target=self.train_target,
                 loss_function=loss_function,
                 max_loss_scale=max_loss_scale,
             )
        elif self.net_builder == TrainTarget.ORIGIN:
            self.pred, self.loss = build_pinn(
                self.model,
                sig_net_dim=hidden_sig_dims,
                tanh_net_dim=hidden_tanh_dims,
                weight_optim=_build_optimizer(weight_optim_method,
                                              weight_optim_param),
                bias_optim=_build_optimizer(bias_optim_method,
                                            bias_optim_param),
                loss_function=loss_function,
                max_loss_scale=max_loss_scale,
            )

        train_init_net, train_net = instantiator.generate_training_nets(
            self.model)
        workspace.RunNetOnce(train_init_net)
        workspace.CreateNet(train_net)
        self.net_store['train_net'] = train_net

        pred_net = instantiator.generate_predict_net(self.model)
        workspace.CreateNet(pred_net)
        self.net_store['pred_net'] = pred_net

        if 'eval' in self.input_data_store:
            # Create eval net
            self.model.input_feature_schema.sig_input.set_value(
                input_data_eval[0].get(), unsafe=True)
            self.model.input_feature_schema.tanh_input.set_value(
                input_data_eval[1].get(), unsafe=True)

            if self.train_target == TrainTarget.ORIGIN:
                if self.net_builder == TrainTarget.ADJOINT:
                    self.model.trainer_extra_schema.origin_loss_record.label.set_value(
                        input_data_eval[2].get(), unsafe=True)
                elif self.net_builder == TrainTarget.ORIGIN:
                    self.model.trainer_extra_schema.label.set_value(
                        input_data_eval[2].get(), unsafe=True)

            if self.train_target == TrainTarget.ADJOINT:
                raise Exception('Not Implemented')

            eval_net = instantiator.generate_eval_net(self.model)
            workspace.CreateNet(eval_net)
            self.net_store['eval_net'] = eval_net
Пример #6
0
 def run_train_net(self):
     self.model.output_schema = schema.Struct()
     train_init_net, train_net = \
         layer_model_instantiator.generate_training_nets(self.model)
     workspace.RunNetOnce(train_init_net)
     workspace.RunNetOnce(train_net)
Пример #7
0
    def build_nets(
            self,
            hidden_sig_dims,
            hidden_tanh_dims,
            train_batch_size=1,
            eval_batch_size=1,
            weight_optim_method='AdaGrad',
            weight_optim_param={
                'alpha': 0.01,
                'epsilon': 1e-4
            },
            bias_optim_method='AdaGrad',
            bias_optim_param={
                'alpha': 0.01,
                'epsilon': 1e-4
            },
            loss_function='scaled_l1',
            max_loss_scale=1.,  # used to scale up the loss signal for small input
            neg_grad_penalty=None,  # whether and how to apply neg_grad_penalty
            init_model=None,  # do postfix matching i.e. adjoint/<blob_nanme> == <blob_nanme>
    ):
        assert len(self.input_data_store) > 0, 'Input data store is empty.'
        assert 'train' in self.input_data_store, 'Missing training data.'
        assert (neg_grad_penalty is None or
                (neg_grad_penalty and self.train_target == TrainTarget.ORIGIN
                 and self.net_builder == TrainTarget.ADJOINT)
                ), '''When set neg_grad_penalty, train target should be ORIGIN,
            but net builder should be ADJOINT'''
        self.has_neg_grad_penalty = True if neg_grad_penalty else False
        self.batch_size = train_batch_size

        # Build the date reader net for train net
        if self.train_target == TrainTarget.ORIGIN:
            input_data_train = data_reader.build_input_reader(
                self.model,
                self.input_data_store['train'][0],
                'minidb',
                ['sig_input', 'tanh_input', 'label'],
                batch_size=train_batch_size,
                data_type='train',
            )
            if 'eval' in self.input_data_store:
                # Build the data reader net for eval net
                input_data_eval = data_reader.build_input_reader(
                    self.model,
                    self.input_data_store['eval'][0],
                    'minidb',
                    ['eval_sig_input', 'eval_tanh_input', 'eval_label'],
                    batch_size=eval_batch_size,
                    data_type='eval',
                )

            if self.net_builder == TrainTarget.ADJOINT:  # Use Adjoint net so output adjoint net
                # for training origin, use origin_loss_record
                self.model.trainer_extra_schema.origin_loss_record.label.set_value(
                    input_data_train[2].get(), unsafe=True)
            elif self.net_builder == TrainTarget.ORIGIN:
                self.model.trainer_extra_schema.label.set_value(
                    input_data_train[2].get(), unsafe=True)

        if self.train_target == TrainTarget.ADJOINT:
            raise Exception('Not Implemented')

        # Build the computational nets
        # Create train net
        self.model.input_feature_schema.sig_input.set_value(
            input_data_train[0].get(), unsafe=True)
        self.model.input_feature_schema.tanh_input.set_value(
            input_data_train[1].get(), unsafe=True)

        if self.net_builder == TrainTarget.ADJOINT:
            # decide adjoint tag
            adjoint_tag = 'no_tag'
            if self.train_target == TrainTarget.ORIGIN and neg_grad_penalty is None:
                adjoint_tag = Tags.PREDICTION_ONLY

            (self.pred, self.sig_adjoint_pred, self.tanh_adjoint_pred,
             self.loss) = build_adjoint_pinn(
                 self.model,
                 sig_input_dim=self.sig_input_dim,
                 tanh_input_dim=self.tanh_input_dim,
                 sig_net_dim=hidden_sig_dims,
                 tanh_net_dim=hidden_tanh_dims,
                 weight_optim=_build_optimizer(weight_optim_method,
                                               weight_optim_param),
                 bias_optim=_build_optimizer(bias_optim_method,
                                             bias_optim_param),
                 adjoint_tag=adjoint_tag,
                 train_target=self.train_target,
                 loss_function=loss_function,
                 max_loss_scale=max_loss_scale,
                 neg_grad_penalty=neg_grad_penalty,
             )
        elif self.net_builder == TrainTarget.ORIGIN:
            self.pred, self.loss = build_pinn(
                self.model,
                sig_net_dim=hidden_sig_dims,
                tanh_net_dim=hidden_tanh_dims,
                weight_optim=_build_optimizer(weight_optim_method,
                                              weight_optim_param),
                bias_optim=_build_optimizer(bias_optim_method,
                                            bias_optim_param),
                loss_function=loss_function,
                max_loss_scale=max_loss_scale,
            )

        train_init_net, train_net = instantiator.generate_training_nets(
            self.model)
        workspace.RunNetOnce(train_init_net)

        if init_model:
            model_name = init_model['name']
            print('[INFO] Init params from ' + model_name)
            given_init_net = exporter.load_init_net(model_name)
            if 'prefix' in init_model.keys():
                print('[INFO] Append ' + init_model['prefix'] +
                      ' to all blob names.')
                for op in given_init_net.op:
                    op.output[0] = init_model['prefix'] + op.output[0]
                workspace.RunNetOnce(given_init_net)

        workspace.CreateNet(train_net)
        self.net_store['train_net'] = train_net

        pred_net = instantiator.generate_predict_net(self.model)
        workspace.CreateNet(pred_net)
        self.net_store['pred_net'] = pred_net

        if 'eval' in self.input_data_store:
            # Create eval net
            self.model.input_feature_schema.sig_input.set_value(
                input_data_eval[0].get(), unsafe=True)
            self.model.input_feature_schema.tanh_input.set_value(
                input_data_eval[1].get(), unsafe=True)

            if self.train_target == TrainTarget.ORIGIN:
                if self.net_builder == TrainTarget.ADJOINT:
                    self.model.trainer_extra_schema.origin_loss_record.label.set_value(
                        input_data_eval[2].get(), unsafe=True)
                elif self.net_builder == TrainTarget.ORIGIN:
                    self.model.trainer_extra_schema.label.set_value(
                        input_data_eval[2].get(), unsafe=True)

            if self.train_target == TrainTarget.ADJOINT:
                raise Exception('Not Implemented')

            eval_net = instantiator.generate_eval_net(self.model)
            workspace.CreateNet(eval_net)
            self.net_store['eval_net'] = eval_net
Пример #8
0
 def run_train_net(self):
     self.model.output_schema = schema.Struct()
     train_init_net, train_net = \
         layer_model_instantiator.generate_training_nets(self.model)
     workspace.RunNetOnce(train_init_net)
     workspace.RunNetOnce(train_net)
Пример #9
0
    def build_nets(
        self,
        hidden_dims,
        batch_size=1,
        optim_method='AdaGrad',
        optim_param={
            'alpha': 0.01,
            'epsilon': 1e-4
        },
    ):
        assert len(self.input_data_store) > 0, 'Input data store is empty.'
        assert 'train' in self.input_data_store, 'Missing training data.'
        self.batch_size = batch_size
        # Build the date reader net for train net
        input_data_train = data_reader.build_input_reader(
            self.model,
            self.input_data_store['train'][0],
            'minidb',
            ['origin_input', 'adjoint_input', 'label'],
            batch_size=batch_size,
            data_type='train',
        )

        if 'eval' in self.input_data_store:
            # Build the data reader net for eval net
            input_data_eval = data_reader.build_input_reader(
                self.model,
                self.input_data_store['eval'][0],
                'minidb',
                ['origin_input', 'adjoint_input', 'label'],
                batch_size=batch_size,
                data_type='eval',
            )

        # Build the computational nets
        # Create train net
        self.model.input_feature_schema.origin_input.set_value(
            input_data_train[0].get(), unsafe=True)
        self.model.input_feature_schema.adjoint_input.set_value(
            input_data_train[1].get(), unsafe=True)
        self.model.trainer_extra_schema.label.set_value(
            input_data_train[2].get(), unsafe=True)

        self.origin_pred, self.adjoint_pred, self.loss = build_adjoint_mlp(
            self.model,
            input_dim=self.input_dim,
            hidden_dims=hidden_dims,
            output_dim=self.output_dim,
            optim=_build_optimizer(optim_method, optim_param),
        )

        train_init_net, train_net = instantiator.generate_training_nets(
            self.model)
        workspace.RunNetOnce(train_init_net)
        workspace.CreateNet(train_net)
        self.net_store['train_net'] = train_net

        pred_net = instantiator.generate_predict_net(self.model)
        workspace.CreateNet(pred_net)
        self.net_store['pred_net'] = pred_net

        if 'eval' in self.input_data_store:
            # Create eval net
            self.model.input_feature_schema.origin_input.set_value(
                input_data_eval[0].get(), unsafe=True)
            self.model.input_feature_schema.adjoint_input.set_value(
                input_data_eval[1].get(), unsafe=True)
            self.model.trainer_extra_schema.label.set_value(
                input_data_eval[2].get(), unsafe=True)
            eval_net = instantiator.generate_eval_net(self.model)
            workspace.CreateNet(eval_net)
            self.net_store['eval_net'] = eval_net