コード例 #1
0
ファイル: bamboo.py プロジェクト: zkmartin/tframe
 def _train_to_the_top(self,
                       *args,
                       branch_index_s=0,
                       branch_index_e=0,
                       lr_list=None,
                       **kwargs):
     if lr_list is None: lr_list = [0.000088] * (self.branches_num + 1)
     if branch_index_e == 0:
         train_end_index = self.branches_num + 1
     else:
         train_end_index = branch_index_e + 1
     for i in range(branch_index_s, train_end_index):
         self.set_branch_index(i)
         # TODO modified the learning rate of the optimizer
         if i > 0:
             FLAGS.overwrite = False
             FLAGS.save_best = True
             self.launch_model(FLAGS.overwrite and FLAGS.train)
             if i == self.branches_num:
                 self._branches_variables_assign(0, output=True)
             else:
                 self._branches_variables_assign(i)
         self._optimizer_lr_modify(lr_list[i])
         self._train_step = self._optimizer.minimize(
             loss=self._losses[i], var_list=self._var_list[i])
         Predictor.train(self, *args, **kwargs)
     FLAGS.overwrite = False
     FLAGS.save_best = True
     self.launch_model(FLAGS.overwrite and FLAGS.train)
     self.set_branch_index(branch_index_e)
     self._optimizer_lr_modify(lr_list[0] * 0.1)
     self._train_step = self._optimizer.minimize(loss=self._loss)
     Predictor.train(self, *args, **kwargs)
コード例 #2
0
ファイル: bamboo.py プロジェクト: zkmartin/tframe
    def _train(self,
               *args,
               branch_index=0,
               freeze_index=-1,
               lr_list=None,
               **kwargs):
        self.set_branch_index(branch_index)
        # TODO
        branch_train = kwargs.get('branch_train', False)
        if branch_train:
            self._train_step = self._optimizer.minimize(
                loss=self._losses[branch_index],
                var_list=self.branch_var_list[branch_index])
        else:
            if freeze_index == -1:
                self._train_step = self._optimizer.minimize(self._loss)
            else:
                trained_net = self.trunk_net[freeze_index + 1:branch_index + 1]
                var_list = [net.var_list for net in trained_net]
                if lr_list is not None:
                    self._train_step = self.layer_lr(lr_list=lr_list,
                                                     var_list=var_list)
                else:
                    self._train_step = self._optimizer.minimize(
                        loss=self._loss, var_list=var_list)

        # Call parent's train method
        Predictor.train(self, *args, **kwargs)
コード例 #3
0
ファイル: bamboo.py プロジェクト: zkmartin/tsframe
 def train(self, *args, branch_index=0, **kwargs):
   self.set_branch_index(branch_index)
   # TODO
   freeze = kwargs.get('freeze', True)
   if not freeze:
     self.train_step.substitute(self._optimizer.minimize(self.loss.tensor))
   # Call parent's train method
   Predictor.train(self, *args, **kwargs)
コード例 #4
0
ファイル: bamboo.py プロジェクト: zkmartin/tsframe
 def __init__(self, mark=None):
   # Call parent's initializer
   Predictor.__init__(self, mark)
   # Private fields
   self._branch_index = -1
   self._output_list = []
   self._losses = []
   self._train_ops = []
   self._metrics = []
コード例 #5
0
ファイル: bamboo.py プロジェクト: zkmartin/tframe
 def __init__(self, mark=None, **kwargs):
     # Call parent's initializer
     Predictor.__init__(self, mark)
     # Private fields
     self._output_list = []
     self._losses = []
     self._metrics = []
     self._train_ops = []
     self._var_list = []
     self._branch_index = 0
     self._identity_initial = kwargs.get('identity', False)
コード例 #6
0
 def __init__(self, mark=None):
     # Call parent's initializer
     Predictor.__init__(self, mark)
     # Private attributes
     # .. Options
     self.strict_residual = True
     # .. tframe objects
     self._master = 0
     self._boutputs = []
     self._losses = []
     self._train_steps = []
     self._metrics = []
     self._train_step_summaries = []
     self._validation_summaries = []
コード例 #7
0
ファイル: model_lib.py プロジェクト: zkmartin/Audio-Tagging
def lstm(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'lstm_' + th.mark
    model = Predictor(mark=th.mark, net_type=Recurrent)

    # Add input layer
    model.add(Input(sample_shape=[th.memory_depth]))
    # Add hidden layers
    for _ in range(th.num_blocks):
        model.add(BasicLSTMCell(th.hidden_dim, with_peepholes=False))
    # Add output layer
    model.add(Linear(output_dim=1))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build_as_regressor(optimizer)

    return model
コード例 #8
0
ファイル: neural_net.py プロジェクト: zkmartin/nls
  def __init__(self, memory_depth, mark='nn', degree=None, **kwargs):
    # Sanity check
    if memory_depth < 1: raise ValueError('!! Memory depth should be positive')

    # Call parent's construction methods
    Model.__init__(self)

    # Initialize fields
    self.memory_depth = memory_depth
    self.D = memory_depth
    self.degree = degree
    # TODO: compromise
    bamboo = kwargs.get('bamboo', False)
    bamboo_broad = kwargs.get('bamboo_broad', False)
    identity_inital = kwargs.get('identity_initial', False)
    if degree is not None:
      self.nn = VolterraNet(degree, memory_depth, mark, **kwargs)
    elif bamboo:
      self.nn = Bamboo(mark=mark, identity=identity_inital)
    elif bamboo_broad:
      self.nn = Bamboo_Broad(mark=mark, inter_type=pedia.fork, identity=identity_inital)
    else: self.nn = Predictor(mark=mark)
コード例 #9
0
    def __init__(self, memory_depth, mark='nn', degree=None, **kwargs):
        # Sanity check
        if memory_depth < 1:
            raise ValueError('!! Memory depth should be positive')

        # Call parent's construction methods
        Model.__init__(self)

        # Initialize fields
        self.memory_depth = memory_depth
        self.D = memory_depth
        self.degree = degree
        # TODO: compromise
        bamboo = kwargs.get('bamboo', False)
        nn_class = kwargs.get('nn_class', None)
        if nn_class is not None:
            self.nn = nn_class(mark=mark)
        elif degree is not None:
            self.nn = VolterraNet(degree, memory_depth, mark, **kwargs)
        elif bamboo:
            self.nn = Bamboo(mark=mark)
        else:
            self.nn = Predictor(mark=mark)
コード例 #10
0
def typical(th, cells):
  assert isinstance(th, Config)
  # Initiate a model
  model = Predictor(mark=th.mark, net_type=Recurrent)
  # Add layers
  model.add(Input(sample_shape=th.input_shape))
  # Add hidden layers
  if not isinstance(cells, (list, tuple)): cells = [cells]
  for cell in cells: model.add(cell)
  # Build model and return
  output_and_build(model, th)
  return model
コード例 #11
0
def rnn0(th):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    nn_class = lambda mark: Predictor(mark=mark, net_type=Recurrent)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=nn_class)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input(sample_shape=[th.memory_depth]))
    for _ in range(th.num_blocks):
        nn.add(BasicRNNCell(state_size=th.hidden_dim))
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
コード例 #12
0
ファイル: bamboo.py プロジェクト: zkmartin/tframe
 def predict(self, data, **kwargs):
     index = kwargs.get('branch_index', 0)
     self.set_branch_index(index)
     # Call parent's predict method
     return Predictor.predict(self, data, **kwargs)
コード例 #13
0
ファイル: neural_net.py プロジェクト: zkmartin/nls
class NeuralNet(Model):
  """A model for non-linear system based on neural network"""

  def __init__(self, memory_depth, mark='nn', degree=None, **kwargs):
    # Sanity check
    if memory_depth < 1: raise ValueError('!! Memory depth should be positive')

    # Call parent's construction methods
    Model.__init__(self)

    # Initialize fields
    self.memory_depth = memory_depth
    self.D = memory_depth
    self.degree = degree
    # TODO: compromise
    bamboo = kwargs.get('bamboo', False)
    bamboo_broad = kwargs.get('bamboo_broad', False)
    identity_inital = kwargs.get('identity_initial', False)
    if degree is not None:
      self.nn = VolterraNet(degree, memory_depth, mark, **kwargs)
    elif bamboo:
      self.nn = Bamboo(mark=mark, identity=identity_inital)
    elif bamboo_broad:
      self.nn = Bamboo_Broad(mark=mark, inter_type=pedia.fork, identity=identity_inital)
    else: self.nn = Predictor(mark=mark)

  # region : Public Methods

  def default_build(self, learning_rate=0.001, optimizer=None):
    if optimizer is None:
      optimizer = tf.train.AdamOptimizer(learning_rate)
    self.nn.build(loss='euclid', metric='ratio', metric_name='Err %',
                  optimizer=optimizer)

  def inference(self, input_, **kwargs):
    if not self.nn.built:
      raise AssertionError('!! Model has not been built yet')
    mlp_input = self._gen_mlp_input(input_)
    tfinput = TFData(mlp_input)
    output = self.nn.predict(tfinput, **kwargs).flatten()

    output = Signal(output)
    output.__array_finalize__(input_)
    return output

  def identify(self, training_set, val_set=None, probe=None,
               batch_size=64, print_cycle=100, snapshot_cycle=1000,
               snapshot_function=None, epoch=1, **kwargs):
    # Train
    self.nn.train(batch_size=batch_size, training_set=training_set,
                  validation_set=val_set, print_cycle=print_cycle,
                  snapshot_cycle=snapshot_cycle, epoch=epoch, probe=None,
                  snapshot_function=snapshot_function, **kwargs)

  def evaluate(self, dataset, start_at=0, plot=False, **kwargs):
    # Check input
    if not isinstance(dataset, DataSet):
      raise TypeError('!! Input data set must be an instance of DataSet')
    if dataset.responses is None:
      raise ValueError('!! input data set should have responses')
    u, y = dataset.signls[0], dataset.responses[0]

    # Show status
    console.show_status('Evaluating {}'.format(dataset.name))

    # Evaluate
    system_output = y[start_at:]
    model_output = self(u, **kwargs)[start_at:]
    err = system_output - model_output
    ratio = lambda val: 100 * val / system_output.rms

    # The mean value of the simulation error in time domain
    val = err.average
    console.supplement('E[err] = {:.4f} ({:.3f}%)'.format(val, ratio(val)))
    # The standard deviation of the error in time domain
    val = float(np.std(err))
    console.supplement('STD[err] = {:.4f} ({:.3f}%)'.format(val, ratio(val)))
    # The root mean square value of the error in time domain
    val = err.rms
    console.supplement('RMS[err] = {:.6f} ({:.3f}%)'.format(val, ratio(val)))

    # Plot
    if not plot: return
    fig = Figure('Simulation Error')
    # Add ground truth
    prefix = 'System Output, $||y|| = {:.4f}$'.format(system_output.norm)
    fig.add(Subplot.PowerSpectrum(system_output, prefix=prefix))
    # Add model output
    prefix = 'Model Output, $||\Delta|| = {:.4f}$'.format(err.norm)
    fig.add(Subplot.PowerSpectrum(model_output, prefix=prefix, Error=err))
    # Plot
    fig.plot(ylim=True)


  def gen_snapshot_function(self, input_, response):
    from signals.utils import Figure, Subplot

    # Sanity check
    if not isinstance(input_, Signal) or not isinstance(response, Signal):
      raise TypeError('!! Input and response should be instances of Signal')

    def snapshot_function(obj):
      assert isinstance(obj, (Predictor, VolterraNet))
      pred = self(input_)
      delta = pred - response

      fig = Figure()
      fig.add(Subplot.PowerSpectrum(response, prefix='Ground Truth'))
      prefix = 'Predicted, $||\Delta||$ = {:.4f}'.format(delta.norm)
      fig.add(Subplot.PowerSpectrum(pred, prefix=prefix, Delta=delta))

      return fig.plot(show=False, ylim=True)

    return snapshot_function

  # endregion : Public Methods

  # region : Private Methods

  def _gen_mlp_input(self, input_):
    return input_.causal_matrix(self.memory_depth)

  # endregion : Private Methods


  """For some reason, do not remove this line"""
コード例 #14
0
 def predict(self, data, branch_index=0, additional_fetches=None):
     self._outputs.plug(self._boutputs[branch_index].tensor)
     return Predictor.predict(self, data, additional_fetches)