예제 #1
0
파일: common.py 프로젝트: zkmartin/tsframe
    def __init__(self,
                 output_dim,
                 force_real=False,
                 use_bias=True,
                 weight_initializer='xavier_uniform',
                 bias_initializer='zeros',
                 weight_regularizer=None,
                 bias_regularizer=None,
                 **kwargs):
        if not np.isscalar(output_dim):
            raise TypeError('!! output_dim must be a scalar, not {}'.format(
                type(output_dim)))

        self._output_dim = output_dim
        self._force_real = force_real
        self._use_bias = use_bias

        self._weight_initializer = initializers.get(weight_initializer)
        self._bias_initializer = initializers.get(bias_initializer)
        self._weight_regularizer = regularizers.get(weight_regularizer,
                                                    **kwargs)
        self._bias_regularizer = regularizers.get(bias_regularizer, **kwargs)

        self.weights = None
        self.biases = None

        self.neuron_scale = [output_dim]
예제 #2
0
파일: ark.py 프로젝트: garthtrickett/tframe
    def __init__(self,
                 output_dim,
                 memory_units=None,
                 mem_config=None,
                 use_mem_wisely=False,
                 weight_regularizer=None,
                 **kwargs):
        # Call parent's constructor
        RNet.__init__(self, self.net_name)

        # Attributes
        self.output_dim = output_dim
        self.memory_units = (self.MemoryUnit.parse_units(mem_config)
                             if memory_units is None else memory_units)
        self.memory_units = [mu for mu in self.memory_units if mu.size > 0]
        checker.check_type(self.memory_units, Ham.MemoryUnit)

        self._state_size = sum([mu.size for mu in self.memory_units])
        self._activation = activations.get('tanh', **kwargs)

        self._use_mem_wisely = use_mem_wisely
        self._truncate = kwargs.get('truncate', False)
        self._weight_regularizer = regularizers.get(weight_regularizer,
                                                    **kwargs)
        # self._use_global_reg = kwargs.get('global_reg', False)
        self._kwargs = kwargs
예제 #3
0
 def get_global_regularizer(self):
   if not self.use_global_regularizer: return None
   from tframe import regularizers
   return regularizers.get(self.regularizer)
예제 #4
0
def neurons(num,
            external_input,
            activation=None,
            memory=None,
            fc_memory=True,
            scope=None,
            use_bias=True,
            truncate=False,
            num_or_size_splits=None,
            weight_initializer='glorot_uniform',
            bias_initializer='zeros',
            weight_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            **kwargs):
    """Analogous to tf.keras.layers.Dense"""
    # Get activation, initializers and regularizers
    if activation is not None: activation = activations.get(activation)
    weight_initializer = initializers.get(weight_initializer)
    bias_initializer = initializers.get(bias_initializer)
    weight_regularizer = regularizers.get(weight_regularizer)
    bias_regularizer = regularizers.get(bias_regularizer)
    activity_regularizer = regularizers.get(activity_regularizer)

    # a. Check prune configs
    if 'prune_frac' in kwargs.keys():
        x_prune_frac, s_prune_frac = (kwargs['prune_frac'], ) * 2
    else:
        x_prune_frac = kwargs.get('x_prune_frac', 0)
        s_prune_frac = kwargs.get('s_prune_frac', 0)
    prune_is_on = hub.pruning_rate_fc > 0.0 and x_prune_frac + s_prune_frac > 0

    # b. Check sparse configs
    x_heads = kwargs.get('x_heads', 0)
    s_heads = kwargs.get('s_heads', 0)
    sparse_is_on = x_heads + s_heads > 0

    # :: Decide to concatenate or not considering a and b
    # .. a
    if memory is None: should_concate = False
    elif prune_is_on: should_concate = x_prune_frac == s_prune_frac
    else: should_concate = fc_memory
    # .. b
    should_concate = should_concate and not sparse_is_on
    #
    separate_memory_neurons = memory is not None and not should_concate

    def get_weights(name, tensor, p_frac, heads):
        shape = [get_dimension(tensor), num]
        if prune_is_on and p_frac > 0:
            assert heads == 0
            return get_weights_to_prune(name, shape, weight_initializer,
                                        p_frac)
        elif heads > 0:
            return _get_sparse_weights(shape[0],
                                       shape[1],
                                       heads,
                                       use_bit_max=True,
                                       coef_initializer=weight_initializer)
        else:
            return get_variable(name, shape, weight_initializer)

    def forward():
        # Prepare a weight list for potential regularizer calculation
        weight_list = []

        # Get x
        x = (tf.concat([external_input, memory], axis=1, name='x_concat_s')
             if should_concate else external_input)

        # - Calculate net input for x
        # .. get weights
        name = 'Wx' if separate_memory_neurons else 'W'
        Wx = get_weights(name, x, x_prune_frac, x_heads)
        weight_list.append(Wx)
        # .. append weights to context, currently only some extractors will use it
        context.weights_list.append(Wx)
        # .. do matrix multiplication
        net_y = get_matmul(truncate)(x, Wx)

        # - Calculate net input for memory and add to net_y if necessary
        if separate_memory_neurons:
            if not fc_memory:
                assert not (prune_is_on and s_prune_frac > 0)
                memory_dim = get_dimension(memory)
                assert memory_dim == num
                Ws = get_variable('Ws', [1, num], weight_initializer)
                net_s = get_multiply(truncate)(memory, Ws)
            else:
                assert prune_is_on or sparse_is_on
                Ws = get_weights('Ws', memory, s_prune_frac, s_heads)
                net_s = get_matmul(truncate)(memory, Ws)

            # Append Ws to weight list and add net_s to net_y
            weight_list.append(Ws)
            net_y = tf.add(net_y, net_s)

        # - Add bias if necessary
        b = None
        if use_bias:
            b = get_bias('bias', num, bias_initializer)
            net_y = tf.nn.bias_add(net_y, b)

        # - Activate and return
        if callable(activation): net_y = activation(net_y)
        return net_y, weight_list, b

    if scope is not None:
        with tf.variable_scope(scope):
            y, W_list, b = forward()
    else:
        y, W_list, b = forward()

    # Add regularizer if necessary
    if callable(weight_regularizer):
        context.add_loss_tensor(
            tf.add_n([weight_regularizer(w) for w in W_list]))
    if callable(bias_regularizer) and b is not None:
        context.add_loss_tensor(bias_regularizer(b))
    if callable(activity_regularizer):
        context.add_loss_tensor(activity_regularizer(y))

    # Split if necessary
    if num_or_size_splits is not None:
        return tf.split(y, num_or_size_splits=num_or_size_splits, axis=1)
    return y