示例#1
0
    def chop_with_stride(x, y, size, stride, rand_shift=True):
        assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray)
        checker.check_type([size, stride], int)

        out_len = SignalSet.chop_with_stride_len_f(len(x), size, stride)
        x_out = np.zeros(shape=(out_len, size))
        y_out = np.zeros(shape=(out_len, *y.shape[1:]))

        if rand_shift:
            remain = len(x) - ((out_len - 1) * stride + size)
            shift = np.random.randint(remain + 1)
        else:
            shift = 0
        for i in range(out_len):
            # Fill in x
            x_out[i] = x[shift + stride * i:shift + stride * i + size]
            # Fill in y if necessary
            if len(x) == len(y):
                y_out[i] = y[shift + stride * i:shift + stride * i + size]

        if len(x) != len(y):
            assert len(y) == 1
            y = np.tile(y, (out_len, 1))

        return x_out, y
示例#2
0
 def _check_signals(signals, responses):
     # Check signals
     if isinstance(signals, Signal): signals = [signals]
     checker.check_type(signals, Signal)
     signal_dict = {}
     # Make sure all signals are sampled under a same fs
     fs = signals[0].fs
     for i in range(1, len(signals)):
         if fs != signals[i].fs:
             raise ValueError(
                 '!! All signals in SignalSet must have the same sampling frequency'
             )
     signal_dict[pedia.signals] = signals
     # Check responses
     if responses is not None:
         if isinstance(responses, Signal): responses = [responses]
         if len(signals) != len(responses):
             raise ValueError(
                 '!! length of responses({}) does not match that of signals({})'
                 .format(len(responses), len(signals)))
         checker.check_type(responses, Signal)
         for r in responses:
             if r.fs != fs:
                 raise ValueError(
                     '!! All responses must have the same sampling frequency with signals'
                 )
         signal_dict[pedia.responses] = responses
     # Return signal dict and fs
     return signal_dict, fs
示例#3
0
    def __init__(self,
                 output_dim,
                 memory_units=None,
                 mem_config=None,
                 use_mem_wisely=False,
                 weight_regularizer=None,
                 **kwargs):
        # Call parent's constructor
        RNet.__init__(self, self.net_name)

        # Attributes
        self.output_dim = output_dim
        self.memory_units = (self.MemoryUnit.parse_units(mem_config)
                             if memory_units is None else memory_units)
        self.memory_units = [mu for mu in self.memory_units if mu.size > 0]
        checker.check_type(self.memory_units, Ham.MemoryUnit)

        self._state_size = sum([mu.size for mu in self.memory_units])
        self._activation = activations.get('tanh', **kwargs)

        self._use_mem_wisely = use_mem_wisely
        self._truncate = kwargs.get('truncate', False)
        self._weight_regularizer = regularizers.get(weight_regularizer,
                                                    **kwargs)
        # self._use_global_reg = kwargs.get('global_reg', False)
        self._kwargs = kwargs
示例#4
0
    def __init__(self,
                 kernel_key,
                 num_neurons,
                 input_,
                 suffix,
                 weight_initializer='glorot_normal',
                 prune_frac=0,
                 LN=False,
                 gain_initializer='ones',
                 etch=None,
                 weight_dropout=0.0,
                 **kwargs):

        # Call parent's initializer
        super().__init__(kernel_key,
                         num_neurons,
                         weight_initializer,
                         prune_frac,
                         etch=etch,
                         weight_dropout=weight_dropout,
                         **kwargs)

        self.input_ = checker.check_type(input_, tf.Tensor)
        self.suffix = checker.check_type(suffix, str)
        self.LN = checker.check_type(LN, bool)
        self.gain_initializer = initializers.get(gain_initializer)
示例#5
0
文件: gdu_h.py 项目: winkywow/tframe
    def __init__(self,
                 configs,
                 activation='tanh',
                 weight_initializer='xavier_normal',
                 use_bias=True,
                 bias_initializer='zeros',
                 reverse=False,
                 use_reset_gate=False,
                 dropout=0.0,
                 layer_normalization=False,
                 **kwargs):
        """
    :param configs: a list or tuple of tuples with format (size, num, delta)
                    or a string with format `S1xM1xD1+S2xM2xD2+...`
    """
        # Call parent's constructor
        CellBase.__init__(self, activation, weight_initializer, use_bias,
                          bias_initializer, layer_normalization, **kwargs)

        # Specific attributes
        self._reverse = checker.check_type(reverse, bool)
        self._use_reset_gate = checker.check_type(use_reset_gate, bool)

        self._groups = self._get_groups(configs)
        self._state_size = self._get_total_size(self._groups)
        self._dropout_rate = checker.check_type(dropout, float)
        assert 0 <= dropout < 1
        # matrices for SOG v1
        self._D = None
        self._S = None
示例#6
0
    def __init__(self,
                 configs,
                 activation='tanh',
                 weight_initializer='xavier_normal',
                 use_bias=True,
                 bias_initializer='zeros',
                 reverse=False,
                 use_reset_gate=False,
                 reset_who='s',
                 shunt_output=False,
                 gate_output=False,
                 **kwargs):
        """
    :param configs: a list or tuple of tuples with format (size, num, delta)
                    or a string with format `S1xM1xD1+S2xM2xD2+...`
    """
        # Call parent's constructor
        CellBase.__init__(self, activation, weight_initializer, use_bias,
                          bias_initializer, **kwargs)

        # Specific attributes
        self._reverse = checker.check_type(reverse, bool)
        self._use_reset_gate = checker.check_type(use_reset_gate, bool)
        self._shunt_output = checker.check_type(shunt_output, bool)
        self._gate_output = checker.check_type(gate_output, bool)

        self._groups = self._get_groups(configs)
        self._state_size = self._get_total_size(self._groups)

        assert reset_who in ('a', 's')
        self._reset_who = reset_who
示例#7
0
文件: gru.py 项目: winkywow/tframe
    def __init__(self,
                 state_size,
                 use_reset_gate=True,
                 activation='tanh',
                 weight_initializer='xavier_normal',
                 use_bias=True,
                 bias_initializer='zeros',
                 z_bias_initializer='zeros',
                 reset_who='s',
                 dropout=0.0,
                 zoneout=0.0,
                 **kwargs):
        """
    :param reset_who: in ('x', 'y')
           'x': a_h = W_h * (h_{t-1} \odot r_t)
           'y': a_h = r_t \odot (W_h * h_{t-1})
           \hat{h}_t = \varphi(Wx*x + a_h + b)
           in which r_t is the reset gate at time step t,
           \odot is the Hadamard product, W_h is the hidden-to-hidden matrix
    """
        # Call parent's constructor
        CellBase.__init__(self, activation, weight_initializer, use_bias,
                          bias_initializer, **kwargs)

        # Specific attributes
        self._state_size = checker.check_positive_integer(state_size)
        self._use_reset_gate = checker.check_type(use_reset_gate, bool)
        self._z_bias_initializer = initializers.get(z_bias_initializer)

        self._dropout_rate = checker.check_type(dropout, float)
        self._zoneout_rate = checker.check_type(zoneout, float)

        assert reset_who in ('s', 'a')
        self._reset_who = reset_who
示例#8
0
  def __init__(
      self,
      state_size,
      activation='tanh',
      use_bias=True,
      weight_initializer='xavier_uniform',
      bias_initializer='zeros',
      input_gate=True,
      output_gate=True,
      forget_gate=True,
      with_peepholes=False,
      **kwargs):
    """
    :param state_size: state size: positive int
    :param activation: activation: string or callable
    :param use_bias: whether to use bias
    :param weight_initializer: weight initializer identifier
    :param bias_initializer: bias initializer identifier
    """
    # Call parent's constructor
    RNet.__init__(self, BasicLSTMCell.net_name)

    # Attributes
    self._state_size = state_size
    self._activation = activations.get(activation, **kwargs)
    self._use_bias = checker.check_type(use_bias, bool)
    self._weight_initializer = initializers.get(weight_initializer)
    self._bias_initializer = initializers.get(bias_initializer)

    self._input_gate = checker.check_type(input_gate, bool)
    self._output_gate = checker.check_type(output_gate, bool)
    self._forget_gate = checker.check_type(forget_gate, bool)
    self._with_peepholes = checker.check_type(with_peepholes, bool)

    self._output_scale = state_size
示例#9
0
 def _gen_rnn_batches(self, x, y, batch_size, num_steps):
     checker.check_positive_integer(batch_size, 'batch size')
     checker.check_type(num_steps, int)
     # Get batch partitions
     data_x, L = self._get_batch_partition(x, batch_size)
     if y is not None:
         if len(x) == len(y):
             data_y, Ly = self._get_batch_partition(y, batch_size)
             assert L == Ly
         else:
             assert len(y) == 1
             data_y = y
     # Chop data further
     if num_steps < 0: num_steps = L
     round_len = int(np.ceil(L / num_steps))
     for i in range(round_len):
         batch_x = data_x[:, i * num_steps:min((i + 1) * num_steps, L)]
         batch_y = None
         if y is not None:
             if len(x) == len(y):
                 batch_y = data_y[:,
                                  i * num_steps:min((i + 1) * num_steps, L)]
             else:
                 assert isinstance(y, np.ndarray)
                 batch_y = np.tile(y,
                                   [batch_x.shape[0], batch_x.shape[1], 1])
         batch = DataSet(batch_x, batch_y, in_rnn_format=True)
         # State should be reset at the beginning of a sequence
         if i == 0: batch.should_reset_state = True
         batch.name = self.name + '_{}'.format(i + 1)
         yield batch
示例#10
0
 def get_data_batches(self,
                      data_set,
                      batch_size,
                      num_steps=None,
                      shuffle=False):
     """ Get batch generator.
 :param data_set: an instance of DataSet or BigData from which data batches
                   will be extracted
 :param batch_size: if is None, default value will be assigned according to
                     the input type of this model
 :param num_steps: step number for RNN data batches
 :param shuffle: whether to shuffle
 :return: a generator or a list
 """
     # Data set must be an instance of DataSet or BigData
     assert isinstance(data_set, (DataSet, BigData))
     if self.input_type is InputTypes.BATCH:
         # If model's input type is normal batch, num_steps will be ignored
         # If batch size is not specified and data is a DataSet, feed it all at
         #  once into model
         if batch_size is None and isinstance(data_set, DataSet):
             return [data_set.stack]
         checker.check_positive_integer(batch_size)
         data_batches = data_set.gen_batches(batch_size, shuffle=shuffle)
     elif self.input_type is InputTypes.RNN_BATCH:
         if batch_size is None: batch_size = 1
         if num_steps is None: num_steps = -1
         checker.check_positive_integer(batch_size)
         checker.check_type(num_steps, int)
         data_batches = data_set.gen_rnn_batches(batch_size, num_steps,
                                                 shuffle)
     else:
         raise ValueError('!! Can not resolve input type of this model')
     return data_batches
示例#11
0
    def get_round_length(self, batch_size, num_steps=None):
        """Get round length for training
    :param batch_size: Batch size. For irregular sequences, this value should
                        be set to 1.
    :param num_steps: Step number. If provided, round length will be calculated
                       for RNN model
    :return: Round length for training
    """
        # Make sure features exist
        self._check_feature()
        checker.check_positive_integer(batch_size, 'batch_size')
        if num_steps is None:
            # :: For feed-forward models
            return int(np.ceil(self.stack.size / batch_size))
        else:
            # :: For recurrent models
            checker.check_type(num_steps, int)
            if self.is_regular_array: arrays = [self.features]
            elif self.parallel_on:
                return self._get_pe_round_length(batch_size, num_steps)
            else:
                arrays = self.features

            len_f = lambda x: x if self.len_f is None else self.len_f
            if num_steps < 0: return len(arrays)
            else:
                return int(
                    sum([
                        np.ceil(len_f(len(array)) // batch_size / num_steps)
                        for array in arrays
                    ]))
示例#12
0
文件: fsrnn.py 项目: winkywow/tframe
    def __init__(self,
                 fast_size,
                 fast_layers,
                 slow_size,
                 hyper_kernel,
                 activation='tanh',
                 weight_initializer='xavier_normal',
                 use_bias=True,
                 bias_initializer='zeros',
                 input_dropout=0.0,
                 output_dropout=0.0,
                 forget_bias=0,
                 **kwargs):
        # Call parent's constructor
        CellBase.__init__(self, activation, weight_initializer, use_bias,
                          bias_initializer, **kwargs)

        self.kernel_key = checker.check_type(hyper_kernel, str)
        # Specific attributes
        self._fast_size = checker.check_positive_integer(fast_size)
        self._fast_layers = checker.check_positive_integer(fast_layers)
        self._slow_size = checker.check_positive_integer(slow_size)
        self._hyper_kernel = self._get_hyper_kernel(hyper_kernel,
                                                    do=th.rec_dropout,
                                                    forget_bias=forget_bias)

        self._input_do = checker.check_type(input_dropout, float)
        self._output_do = checker.check_type(output_dropout, float)
示例#13
0
    def __init__(self,
                 configs,
                 factoring_dim=None,
                 psi_config=None,
                 activation='tanh',
                 weight_initializer='xavier_normal',
                 use_bias=True,
                 bias_initializer='zeros',
                 reverse=False,
                 **kwargs):
        """
    :param psi_config: e.g. 's:xs+g;xs', 's:x'
    """

        # Call parent's constructor
        CellBase.__init__(self, activation, weight_initializer, use_bias,
                          bias_initializer, **kwargs)

        # Specific attributes
        self._reverse = checker.check_type(reverse, bool)
        self._groups = self._get_groups(configs)
        self._state_size = self._get_total_size(self._groups)

        if factoring_dim is None: factoring_dim = self._state_size
        self._fd = checker.check_positive_integer(factoring_dim)

        if not psi_config: psi_config = 's:x'
        self._psi_string = checker.check_type(psi_config, str)
        self._psi_config = self._parse_psi_string()
示例#14
0
文件: lstms.py 项目: winkywow/tframe
  def __init__(
      self,
      state_size,
      activation='tanh',
      weight_initializer='xavier_normal',
      use_bias=True,
      couple_fi=False,
      cell_bias_initializer='zeros',
      input_bias_initializer='zeros',
      output_bias_initializer='zeros',
      forget_bias_initializer='zeros',
      use_output_activation=True,
      **kwargs):
    # Call parent's constructor
    CellBase.__init__(self, activation, weight_initializer,
                      use_bias, cell_bias_initializer, **kwargs)

    # Specific attributes
    self._state_size = checker.check_positive_integer(state_size)
    self._input_bias_initializer = initializers.get(input_bias_initializer)
    self._output_bias_initializer = initializers.get(output_bias_initializer)
    self._forget_bias_initializer = initializers.get(forget_bias_initializer)

    self._couple_fi = checker.check_type(couple_fi, bool)
    self._use_output_activation = checker.check_type(
      use_output_activation, bool)
示例#15
0
def load_data(path, memory_depth=1, validate_size=5000, test_size=88000):
    data_sets = WHBM.load(path,
                          validate_size=validate_size,
                          test_size=test_size,
                          memory_depth=memory_depth,
                          skip_head=True)
    checker.check_type(data_sets, SignalSet)
    return data_sets
示例#16
0
    def __call__(self, *input_list):
        """Link neuron array to graph
    :param input_list: inputs to be fully connected
    :return: neuron outputs
    """
        input_list = [x for x in input_list if x is not None]
        # Add inputs
        if input_list:
            checker.check_type(input_list, tf.Tensor)
            # Concatenation is forbidden when LN is on
            if all([
                    len(input_list) > 1, self._layer_normalization,
                    self._normalize_each_psi
            ]):
                for x in input_list:
                    self.add_kernel(x)
            else:
                # Concatenate to speed up calculation if necessary
                if len(input_list) > 1: x = tf.concat(input_list, axis=-1)
                else: x = input_list[0]
                # Add kernel
                self.add_kernel(x)

        # Make sure psi_kernel is not empty
        assert self.psi_kernels

        # Link
        with tf.variable_scope(self.scope):

            # Calculate summed input a, ref: Ba, etc. Layer Normalization, 2016
            a_list = [kernel() for kernel in self.psi_kernels]
            a = a_list[0] if len(a_list) == 1 else tf.add_n(
                a_list, 'summed_inputs')

            # Do layer normalization here if necessary
            if self._layer_normalization:
                if not self._normalize_each_psi:
                    a = PsiKernel.layer_normalization(a,
                                                      self._gain_initializer,
                                                      False)
                # If LN is on, use_bias option must be True
                self._use_bias = True

            # Add bias if necessary
            if self._use_bias:
                if self.bias_kernel is None: self.register_bias_kernel()
                bias = self.bias_kernel()
                # Some kernels may generate bias of shape [batch_size, num_neurons]
                if len(bias.shape) == 1:
                    a = tf.nn.bias_add(a, self.bias_kernel())
                else:
                    a = a + bias

            # Activate if necessary
            if self._activation: a = self._activation(a)

        return a
示例#17
0
  def _check_data(self):
    """data_dict should be a non-empty dictionary containing equilength lists of
       regular numpy arrays. Samples in the same sequence list must have the
       same shape
       summ_dict should be a dictionary which stores summaries of each sequence.
   """
    # Check data_dict and summ_dict
    if not isinstance(self.data_dict, dict) or len(self.data_dict) == 0:
      raise TypeError('!! data_dict must be a non-empty dictionary')
    if not isinstance(self.summ_dict, dict):
      raise TypeError('!! summ_dict must be a dictionary')

    list_length = len(list(self.data_dict.values())[0])

    # Check each item in data_dict
    for name, seq_list in self.data_dict.items():
      checker.check_type(seq_list, np.ndarray)
      # Check type and length
      if not isinstance(seq_list, list) or len(seq_list) != list_length:
        raise ValueError('!! {} should be a list with length {}'.format(
          name, list_length))
      # Check structure
      if [len(s) for s in seq_list] != self.structure:
        raise ValueError('!! sequence list structure inconformity: {} '.format(
          name))
      # Make sure len(sample_shape) > 0
      if len(seq_list[0].shape) < 2:
        seq_list = [s.reshape(-1, 1) for s in seq_list]
      # Check sample shape
      shapes = [s.shape[1:] for s in seq_list]
      if shapes.count(shapes[0]) != len(shapes):
        raise AssertionError('!! Sample shape in {} are inconformity'.format(
          name))

      self.data_dict[name] = seq_list

    # Check each item in summ_dict
    for name, summ_list in self.summ_dict.items():
      # Check type and length
      if not isinstance(summ_list, list) or len(summ_list) != list_length:
        raise ValueError('!! {} should be a list of length {}'.format(
          name, list_length))

      if checker.check_scalar_list(summ_list): continue

      checker.check_type(summ_list, np.ndarray)
      # Check structure
      for i, summ in enumerate(summ_list):
        if summ.shape[0] > 1: summ_list[i] = np.reshape(summ, (1, *summ.shape))

      # Check sample shape
      shapes = [s.shape[1:] for s in summ_list]
      if shapes.count(shapes[0]) != len(shapes):
        raise AssertionError('!! Sample shape in {} are inconformity'.format(
          name))
示例#18
0
  def __init__(
      self,
      temporal_configs,
      output_size=None,
      spatial_configs=None,
      temporal_reverse=False,
      spatial_reverse=False,
      temporal_activation='tanh',
      spatial_activation='tanh',
      weight_initializer='xavier_normal',
      use_bias=True,
      bias_initializer='zeros',
      **kwargs):
    """
    :param output_size:
    Denote y as cell output, s as state
    (1) output_size is 0 or None
        y = new_s
    (2) output_size is not None
        y = neuron(x, prev_s, ...)
    """
    # Call parent's constructor
    CellBase.__init__(self, temporal_activation, weight_initializer,
                      use_bias, bias_initializer, **kwargs)
    self._temporal_activation = self._activation
    self._spatial_activation = spatial_activation

    # Specific attributes
    self._temporal_groups = self._get_groups(temporal_configs)
    self._state_size = self._get_total_size(self._temporal_groups)

    # Set spatial groups
    self._output_size = None if output_size == 0 else output_size
    self._spatial_groups = []
    if spatial_configs is not None:
      output_dim = (self._output_size if self._output_size is not None
                    else self._state_size)
      # Set spatial_groups
      if spatial_configs == 'default':
        # Check output size
        num_groups = output_dim // 2
        assert num_groups * 2 == output_dim
        self._spatial_groups = [(2, num_groups, 1)]
      else:
        assert isinstance(spatial_configs, str) and len(spatial_configs) > 0
        self._spatial_groups = self._get_groups(spatial_configs)
        total_size = self._get_total_size(self._spatial_groups)
        # Check output dim
        assert output_dim == total_size

    self._reverse_t = checker.check_type(temporal_reverse, bool)
    self._reverse_s = checker.check_type(spatial_reverse, bool)
示例#19
0
 def _get_periods(self, periods, **kwargs):
     # Get max groups
     max_groups = kwargs.get('max_groups', 7)
     if periods is None:
         periods = []
         i = 0
         for _ in range(self._state_size):
             periods.append(2**i)
             i += 1
             if i >= max_groups: i = 0
     else: checker.check_type(periods, int)
     assert len(periods) == self._state_size
     return sorted(periods)
示例#20
0
def _brutal_chop_len_f(self, bs, ns, sz):
    assert isinstance(self, BigData)
    round_len = 0
    assert ns is not None
    for len_list in self.structure:
        checker.check_type(len_list, int)
        # For RNN models
        if ns < 0: round_len += len(len_list)
        else:
            round_len += int(
                sum([np.ceil(size // sz // bs / ns) for size in len_list]))
    # Return round length
    return round_len
示例#21
0
 def _get_file_name(cls, train_size, test_size, unique_, cheat,
                    local_binary, multiple, rule):
     checker.check_positive_integer(train_size)
     checker.check_positive_integer(test_size)
     checker.check_positive_integer(multiple)
     checker.check_type(unique_, bool)
     if rule is not None: tail = rule
     elif unique_: tail = 'U'
     else: tail = 'NU'
     file_name = '{}{}_{}+{}_{}_{}_{}.tfds'.format(
         cls.DATA_NAME, '' if multiple == 1 else '(x{})'.format(multiple),
         train_size, test_size, tail, 'C' if cheat else 'NC',
         'LB' if local_binary else 'P')
     if multiple > 1: file_name = 'm' + file_name
     return file_name
示例#22
0
  def _pad_sequences(sequences, max_steps):
    """Receive a list of irregular sequences and output a regular numpy array"""
    assert isinstance(sequences, list)
    checker.check_positive_integer(max_steps)
    checker.check_type(sequences, np.ndarray)

    if all([s.shape[0] == sequences[0].shape[0] for s in sequences]):
      return np.stack(sequences, axis=0)

    sample_shape = sequences[0].shape[1:]
    assert len(sample_shape) > 0
    stack = np.zeros(shape=(len(sequences), max_steps, *sample_shape))
    for i, s in enumerate(sequences): stack[i, :len(s)] = s

    return stack
示例#23
0
    def _check_data(self):
        """Features and data_dict should not be empty at the same time.
       All data array or list provided must have the same length.
       If features (or targets) are provided as a list (or a tuple),
       its elements must be numpy arrays with exactly the same shape (except
       for the first dimension)."""
        # Make sure data_dict is a dictionary
        if not isinstance(self.data_dict, dict):
            raise TypeError('!! data_dict provided must be a dict')
        # Put all data arrays to a single dict for later check
        data_dict = self.data_dict.copy()
        if self.features is not None:
            data_dict[pedia.features] = self.features
            if self.targets is not None:
                # TODO
                # if type(self.features) != type(self.targets):
                #   raise TypeError('!! features and targets must be of the same type')
                data_dict[pedia.targets] = self.targets

        # Make sure at least one data array is provided
        if len(data_dict) == 0:
            raise AssertionError('!! data not found')
        # Make sure all data array have the same size
        size = -1
        for key, val in data_dict.items():
            # Make sure all data arrays are instances of list or ndarray or sth.
            if not hasattr(val, '__len__'):
                raise AttributeError(
                    '!! {} data must have __len__ attribute'.format(key))
            if size == -1: size = len(val)
            elif size != len(val):
                raise ValueError('!! all data array must have the same size')

            # Make sure features and targets are (lists of) numpy arrays
            if key in (pedia.features, pedia.targets):
                checker.check_type(val, np.ndarray)
                # If features and targets are stored in a list (or a tuple), check
                # .. the shape of each numpy array
                if not isinstance(val, np.ndarray):
                    assert isinstance(val, (list, tuple))
                    shape = None
                    for array in val:
                        assert isinstance(array, np.ndarray)
                        if shape is None: shape = array.shape[1:]
                        elif shape != array.shape[1:]:
                            raise ValueError(
                                '!! samples in {} list should have the same shape'
                                .format(key))
示例#24
0
  def __init__(
      self,
      state_size,
      activation='tanh',
      use_bias=True,
      weight_initializer='xavier_normal',
      bias_initializer='zeros',
      **kwargs):
    """
    :param state_size: state size: positive int
    :param activation: activation: string or callable
    :param use_bias: whether to use bias
    :param weight_initializer: weight initializer identifier
    :param bias_initializer: bias initializer identifier
    """
    # Call parent's constructor
    RNet.__init__(self, self.net_name)

    # Attributes
    self._state_size = state_size
    self._activation = activations.get(activation, **kwargs)
    self._use_bias = checker.check_type(use_bias, bool)
    self._weight_initializer = initializers.get(weight_initializer)
    self._bias_initializer = initializers.get(bias_initializer)
    self._output_scale = state_size
示例#25
0
    def __init__(self,
                 state_size,
                 periods=None,
                 activation='tanh',
                 use_bias=True,
                 weight_initializer='xavier_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """
    :param state_size: State size
    :param periods: a list of integers. If not provided, periods will be set
                    to a default exponential series {2^{i-1}}_{i=0}^{state_size}
    """
        # Call parent's constructor
        RNet.__init__(self, ClockworkRNN.net_name)

        # Attributes
        self._state_size = checker.check_positive_integer(state_size)
        self._periods = self._get_periods(periods, **kwargs)
        self._activation = activations.get(activation, **kwargs)
        self._use_bias = checker.check_type(use_bias, bool)
        self._weight_initializer = initializers.get(weight_initializer)
        self._bias_initializer = initializers.get(bias_initializer)

        # modules = [(start_index, size, period)+]
        self._modules = []
        self._init_modules(**kwargs)
示例#26
0
    def __init__(self,
                 num_neurons,
                 group_size,
                 head_size=-1,
                 activation=None,
                 use_bias=True,
                 weight_initializer='xavier_normal',
                 bias_initializer='zeros',
                 **kwargs):
        """
    Softmax over groups applied to neurons.
    Case 1: head_size < 0: does not use extra neurons
    Case 2: head_size = 0: use extra neurons without a head
    Case 3: head_size > 0: use extra neurons with a head
    """
        # Call parent's constructor
        super().__init__(activation, weight_initializer, use_bias,
                         bias_initializer, **kwargs)

        # Specific attributes
        self._num_neurons = checker.check_positive_integer(num_neurons)
        self._group_size = checker.check_positive_integer(group_size)
        self._head_size = checker.check_type(head_size, int)

        # Developer options
        options = th.developer_options
示例#27
0
  def __init__(
      self,
      num_filter_list,
      num_classes,
      kernel_initializer='glorot_uniform',
      left_repeats=2,
      right_repeats=2,
      activation='relu',
      dropout_rate=0.5,
      name='unet',
      level=1,
      **kwargs):

    # Sanity  check
    assert isinstance(num_filter_list, (tuple, list)) and num_filter_list
    # Call parent's constructor
    # TODO: the level logic is not elegant
    super().__init__(name, level=level, **kwargs)
    # Specific attributes
    self.num_filter_list = num_filter_list
    self.num_classes = checker.check_positive_integer(num_classes)
    # self.kernel_sizes = kernel_sizes
    self.kernel_initializer = kernel_initializer
    self.activation = activation
    self.dropout_rate = checker.check_type(dropout_rate, float)
    self.left_repeats = checker.check_positive_integer(left_repeats)
    self.right_repeats = checker.check_positive_integer(right_repeats)
    # Add layers
    self._add_layers()
示例#28
0
    def __init__(self,
                 output_dim=None,
                 spatial_configs=None,
                 reverse=False,
                 activation='relu',
                 use_bias=True,
                 weight_initializer='xavier_normal',
                 bias_initializer='zeros',
                 **kwargs):

        assert isinstance(activation, str)
        self.activation_string = activation
        # Call parent's constructor
        LayerWithNeurons.__init__(self, activation, weight_initializer,
                                  use_bias, bias_initializer, **kwargs)

        assert not (output_dim is None and spatial_configs is None)
        self._spatial_groups = []
        if spatial_configs is not None:
            self._spatial_groups = self._get_groups(spatial_configs)
            total_size = self._get_total_size(self._spatial_groups)
            if output_dim is None: output_dim = total_size
            assert output_dim == total_size
        self._output_dim = checker.check_positive_integer(output_dim)
        self._reverse = checker.check_type(reverse, bool)

        self.neuron_scale = [output_dim]
示例#29
0
def saturate_loss(tensor, mu=0.5, encourage_saturation=True):
    # TODO: this method is still being developed. DO NOT USE WITHOUT GUIDE
    # Each entry in tensor should be in range [0, 1], which should be guaranteed
    # ... by users
    checker.check_type(tensor, tf.Tensor)
    assert 0 < mu < 1
    # Encourage saturation
    if encourage_saturation:
        # Calculate distance to saturation
        left = tensor[tf.less(tensor, mu)]
        right = tensor[tf.greater(tensor, 0.5)]
        return tf.norm(left) - tf.norm(right)
    else:
        # Calculate distance to unsaturation
        degree = tf.abs(tensor - mu)
    # Calculate loss using reduce mean
    return tf.reduce_mean(degree)
示例#30
0
文件: whbm.py 项目: winkywow/tframe
  def evaluate(f, data_set, plot=False):
    if not callable(f): raise AssertionError('!! Input f must be callable')
    checker.check_type(data_set, SignalSet)
    assert isinstance(data_set, SignalSet)
    if data_set.targets is None:
      raise ValueError('!! Responses not found in SignalSet')
    u, y = data_set.features, np.ravel(data_set.targets)
    assert isinstance(y, Signal)
    # Show status
    console.show_status('Evaluating {} ...'.format(data_set.name))
    # In evaluation, the sum of each metric is started at t = 1000 instead of
    #  t = 0 to eliminate the influence of transient errors at the beginning of
    #  the simulation
    start_at = 1000
    model_output = Signal(f(u), fs=y.fs)
    delta = y - model_output
    err = delta[start_at:]
    assert isinstance(err, Signal)
    ratio = lambda val: 100.0 * val / y.rms

    # The mean value of the simulation error in time domain
    val = err.average
    console.supplement('E[err] = {:.4f}mV ({:.3f}%)'.format(
      val * 1000, ratio(val)))
    # The standard deviation of the error in time domain
    val = float(np.std(err))
    console.supplement('STD[err] = {:.4f}mV ({:.3f}%)'.format(
      val * 1000, ratio(val)))
    # The root mean square value of the error in time domain
    val = err.rms
    console.supplement('RMS[err] = {:.4f}mV ({:.3f}%)'.format(
      val * 1000, ratio(val)))

    # Plot
    if not plot: return
    from tframe.data.sequences.signals.figure import Figure, Subplot
    fig = Figure('Simulation Error')
    # Add ground truth
    prefix = 'System Output, $||y|| = {:.4f}$'.format(y.norm)
    fig.add(Subplot.PowerSpectrum(y, prefix=prefix))
    # Add model output
    prefix = 'Model Output, RMS($\Delta$) = ${:.4f}mV$'.format(1000 * err.rms)
    fig.add(Subplot.PowerSpectrum(model_output, prefix=prefix, Error=delta))
    # Plot
    fig.plot()