Пример #1
0
    def __init__(self, input_size, output_size):
        super(PredictorOfTorque, self).__init__()
        self.encoder = Encoder(input_size)
        self.stepper = Stepper(self.encoder.out_size)
        self.decoder = Decoder(self.stepper.out_size, output_size)

        self.parameter_size = get_nn_params(self, True)
        add_save_load_optimize_optimizer_optim_context(PredictorOfTorque, self)
Пример #2
0
    def __init__(self, state_feature_size, target_feature_size, n_torque_predictors, expand_ratio=1):
        super(ActorAgregate, self).__init__()
        input_size = state_feature_size + target_feature_size
        output_size = n_torque_predictors
        self.fc0 = nn.Linear(input_size, input_size * expand_ratio)
        self.fc1 = nn.Linear(input_size * expand_ratio, output_size)

        n_parameter = get_nn_params(self, True)
        add_save_load_optimize_optimizer_optim_context(ActorAgregate, self)
Пример #3
0
    def __init__(self, input_size, output_size):
        super(Predictor, self).__init__()
        self.encoder = Encoder(input_size)
        self.stepper = Stepper(self.encoder.out_size)
        self.decoder = Decoder(self.stepper.out_size, output_size)
        self.parameter_size = get_nn_params(self, True)
        self.save_path = 'saved_models'

        self.optimizer = optim.Adam(self.parameters())
        self.criterion = nn.MSELoss()
Пример #4
0
    def __init__(self, input_size, output_size, critic=None):
        super(Actor, self).__init__()
        self.input_size = input_size
        self.output_size = output_size
        self.fc0 = nn.Linear(input_size, input_size*2)
        self.fc1 = nn.Linear(input_size*2, input_size*5)
        self.fc2 = nn.Linear(input_size*5, output_size)
        n_parameter = get_nn_params(self, True)

        add_save_load_optimize_optimizer_optim_context(Actor, self)
        self.critic = critic
Пример #5
0
    def __init__(self, config_array):
        super(ActorP, self).__init__()
        self.config_array = config_array
        self.in_features = config_array[0]
        self.out_features = config_array[-1]
        fcs = [nn.Linear(i, j) for i, j in zip(config_array[:-1], config_array[1:])]
        self.fc_list = []
        for i, fc in enumerate(fcs):
            setattr(self, f'fc{i}', fc)
            self.fc_list.append(f'fc{i}')

        n_parameter = get_nn_params(self, True)

        add_save_load_optimize_optimizer_optim_context(ActorP, self)
        add_auto_save(ActorP, self)
Пример #6
0
 def __init__(self, in_size, compress_ratio=1.2, n_layer=1):
     super(Encoder, self).__init__()
     self.in_size = in_size
     self.compress_ratio = compress_ratio
     sizes = [
         math.floor(in_size * compress_ratio**i) for i in range(n_layer + 1)
     ]
     self.sizes = [*zip(sizes[:-1], sizes[1:])]
     self.out_size = sizes[-1]
     self.fcs_list = []
     for i, fc in enumerate(
         [nn.Linear(in_size, out_size)
          for in_size, out_size in self.sizes]):
         setattr(self, f'fc{i}', fc)
         self.fcs_list.append(f'fc{i}')
     self.parameter_size = get_nn_params(self)
Пример #7
0
    def save_network(self,
                     name='_snapshot',
                     best_p=None,
                     update_index=0,
                     history_errs=[]):
        if numpy.mod(update_index, self.__options['saveFreq']) == 0:
            print('Saving...')

            if best_p is not None:
                params = copy(best_p)
            else:
                params = utils.get_nn_params(self.nn_network.params())
            numpy.savez(self.__options['saveto'] + name,
                        history_errs=history_errs,
                        **params)
            pkl.dump(self.__options,
                     open('%s.pkl' % self.__options['saveto'], 'wb'))
            print('Done')
Пример #8
0
    def __init__(self,
                 input_size,
                 output_size,
                 torque_input_size=2,
                 expand_ratio=2,
                 n_layer=2,
                 base_rnn='gru'):
        super(PredictorLSTMTorque, self).__init__()
        self.cls = PredictorLSTMTorque
        self.input_size = input_size  # 15 (env_state - torque)
        self.stepwise_input_size = torque_input_size
        self.expand_ratio = expand_ratio
        self.n_layer = n_layer
        self.base_rnn = base_rnn

        self.encoder0 = Encoder(input_size,
                                compress_ratio=expand_ratio *
                                n_layer)  # for h_0
        if base_rnn == 'lstm':
            self.encoder1 = Encoder(input_size,
                                    compress_ratio=expand_ratio *
                                    n_layer)  # for c_0
            self.rnn = nn.LSTM(torque_input_size,
                               self.encoder0.out_size // n_layer,
                               n_layer,
                               batch_first=True)
        elif base_rnn == 'gru':
            self.rnn = nn.GRU(torque_input_size,
                              self.encoder0.out_size // n_layer,
                              n_layer,
                              batch_first=True)
        elif base_rnn == 'rnn':
            self.rnn = nn.RNN(torque_input_size,
                              self.encoder0.out_size // n_layer,
                              n_layer,
                              batch_first=True)
        else:
            raise (AttributeError('unknown base rnn in PredictorLSTM'))

        assert self.encoder0.out_size // n_layer == self.encoder0.out_size / n_layer  # sanity check
        self.decoder = Decoder(self.encoder0.out_size // n_layer, output_size)

        self.parameter_size = get_nn_params(self, True)
        add_save_load_optimize_optimizer_optim_context(self.cls, self)
Пример #9
0
    def log_validation_loss(self, epoch_index):
        options = self.options
        if numpy.mod(self.update_index, self.options['validFreq']) == 0:
            self.use_noise.set_value(0.)
            train_err = 0
            valid_err = 0
            test_err = 0

            if self.__validate:
                valid_err = -Model.predict_probs(
                    self.f_probs, self.options, self.__worddict,
                    utils.extract_input, self.__validate,
                    self.kf_valid).mean()
            if self.__test:
                test_err = -Model.predict_probs(
                    self.f_probs, self.options, self.__worddict,
                    utils.extract_input, self.__test, self.kf_test).mean()

            self.history_errs.append([valid_err, test_err])

            # the model with the best validation long likelihood is saved seperately with a different name
            if self.update_index == 0 or valid_err <= numpy.array(
                    self.history_errs)[:, 0].min():
                self.best_p = utils.get_nn_params(self.nn_params)

                print('Saving model with best validation ll')
                self.save_network(name='_bestll',
                                  best_p=self.best_p,
                                  update_index=self.update_index,
                                  history_errs=self.history_errs)
                self.bad_counter = 0

            # abort training if perplexity has been increasing for too long
            if epoch_index > options['patience'] and len(
                    self.history_errs
            ) > options['patience'] and valid_err >= numpy.array(
                    self.history_errs)[:-options['patience'], 0].min():
                self.bad_counter += 1
                if self.bad_counter > options['patience']:
                    print('Early Stop!')
                    self.epoch_stop = True

            print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
Пример #10
0
 def __init__(self,
              in_size,
              out_size=None,
              expand_ratio=10,
              h_layer=1,
              recurrent_step=0):
     super(Stepper, self).__init__()
     self.recurrent_step = recurrent_step
     self.in_size = in_size
     self.out_size = in_size if out_size is None else out_size
     sizes = [in_size] + [math.floor(in_size * expand_ratio)] * h_layer + [
         self.out_size
     ]
     self.sizes = [*zip(sizes[:-1], sizes[1:])]
     self.fcs_list = []
     # self.fc{i} = nn.Linear(in, out)
     for i, fc in enumerate(
         [nn.Linear(in_size, out_size)
          for in_size, out_size in self.sizes]):
         setattr(self, f'fc{i}', fc)
         self.fcs_list.append(f'fc{i}')
     self.parameter_size = get_nn_params(self)