コード例 #1
0
ファイル: fc_layer.py プロジェクト: NikSQ/VarRNN
    def __init__(self, layer_idx, is_training, tau, prev_neurons=None):
        rnn_config = get_rnn_config()
        self.train_config = get_train_config()
        self.layer_config = rnn_config['layer_configs'][layer_idx]
        if prev_neurons is None:
            self.w_shape = (rnn_config['layout'][layer_idx - 1],
                            rnn_config['layout'][layer_idx])
        else:
            self.w_shape = (prev_neurons, rnn_config['layout'][layer_idx])
        self.b_shape = (1, self.w_shape[1])
        self.is_training = is_training

        # Activation summaries and specific neurons to gather individual histograms
        self.acts = dict()
        self.act_neurons = np.random.choice(
            range(self.b_shape[1]),
            size=(get_info_config()['tensorboard']['single_acts'], ),
            replace=False)
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'fc' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_s_x = get_batchnormalizer()
            self.bn_b_x = get_batchnormalizer()

        with tf.variable_scope(self.layer_config['var_scope']):
            var_keys = ['w', 'b']
            self.weights = Weights(var_keys, self.layer_config, self.w_shape,
                                   self.b_shape, tau)
コード例 #2
0
    def __init__(self,
                 layer_idx,
                 is_training,
                 tau,
                 bidirectional_inp=False,
                 prev_neurons=None):
        self.rnn_config = get_rnn_config()
        self.train_config = get_train_config()
        self.layer_config = self.rnn_config['layer_configs'][layer_idx]
        if prev_neurons is None:
            if bidirectional_inp:
                self.w_shape = (self.rnn_config['layout'][layer_idx - 1] * 2 +
                                self.rnn_config['layout'][layer_idx],
                                self.rnn_config['layout'][layer_idx])
            else:
                self.w_shape = (self.rnn_config['layout'][layer_idx - 1] +
                                self.rnn_config['layout'][layer_idx],
                                self.rnn_config['layout'][layer_idx])
        else:
            self.w_shape = (prev_neurons +
                            self.rnn_config['layout'][layer_idx],
                            self.rnn_config['layout'][layer_idx])
        self.b_shape = (1, self.w_shape[1])
        self.cell_access_mat = []
        self.is_training = is_training

        # Activation summaries and specific neurons to gather individual histograms
        self.acts = dict()
        self.act_neurons = np.random.choice(
            range(self.b_shape[1]),
            size=(get_info_config()['tensorboard']['single_acts'], ),
            replace=False)

        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'x' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_b_x = []
            self.bn_s_x = []
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'h' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_b_h = []
            self.bn_s_h = []

        with tf.variable_scope(self.layer_config['var_scope']):
            var_keys = ['wi', 'bi', 'wc', 'bc', 'wo', 'bo']
            self.weights = Weights(var_keys, self.layer_config, self.w_shape,
                                   self.b_shape, tau)
コード例 #3
0
ファイル: test_sample.py プロジェクト: jpasquers/vocal-neural
    def setUp(self):
        self.layer_lengths = [3, 3, 3]
        self.weights = Weights(self.layer_lengths, 0.1)
        layer_0 = np.array([
            [1, 2, 1, 2],  #2
            [0, 1, -2, 1],  #0
            [2, 2, 0.5, -1]  #2
        ])
        layer_1 = np.array([
            [1, 2, 0, 0],  #5
            [0, 2, 0, 1],  #6
            [0, 2, 1, 0]  #4
        ])
        self.weights.update_layer(0, layer_0)
        self.weights.update_layer(1, layer_1)

        self.sample_x = np.array([[0.1], [0.2], [0.3]])
        self.sample_y = np.array([[2], [3], [2]])
コード例 #4
0
 def __init__(self,
              layer_lengths,
              learning_rate=.3,
              lmbda=.1,
              numIter=200,
              epsilon=0.125):
     """
     layer_lengths will be an array where each value is the size of that layer
     e.x. [3,4,5] - would have an input layer of size 3, a hidden layer of size 4, 
     and an output layer of size 5.
     Note: These do not include bias terms.
     """
     #initialize weights
     self.weights = Weights(layer_lengths, epsilon)
     self.layer_lengths = layer_lengths
     self.num_layers = len(self.layer_lengths)
     self.learning_rate = learning_rate
     #index of the last layer
     self.L = self.num_layers - 1
     self.lmbda = lmbda
コード例 #5
0
ファイル: test_sample.py プロジェクト: jpasquers/vocal-neural
class TestSample(unittest.TestCase):
    def setUp(self):
        self.layer_lengths = [3, 3, 3]
        self.weights = Weights(self.layer_lengths, 0.1)
        layer_0 = np.array([
            [1, 2, 1, 2],  #2
            [0, 1, -2, 1],  #0
            [2, 2, 0.5, -1]  #2
        ])
        layer_1 = np.array([
            [1, 2, 0, 0],  #5
            [0, 2, 0, 1],  #6
            [0, 2, 1, 0]  #4
        ])
        self.weights.update_layer(0, layer_0)
        self.weights.update_layer(1, layer_1)

        self.sample_x = np.array([[0.1], [0.2], [0.3]])
        self.sample_y = np.array([[2], [3], [2]])

    def test_get_outcome(self):
        sample = BasicSample(self.weights, self.sample_x, self.sample_y)
        result = sample.get_outcome()
        self.assertAlmostEqual(5, result[0][0])
        self.assertAlmostEqual(6, result[1][0])
        self.assertAlmostEqual(4, result[2][0])

    def test_calc_deltas(self):
        #ds[L] = [[-3][-3][-2]]
        #ds[1] = [[-16][-2][-3]]
        #ds[0] = [[-40][-13.5][-31]]
        #as[L] = [[5][6][4]]
        #as[1] = [[1][2][0][2]]
        #as[0] = [[1][0.1][0.2][0.3]]
        #deltas[1] = [[-3,-6,0,-6][-3,-6,0,-6][-2,-4,0,-4]]
        #deltas[0] = [[-16,-1.6,-3.2,-4.8][-2,-.2,-.4,-.6][-3,-.3,-.6,-.9]]

        sample = BasicSample(self.weights, self.sample_x, self.sample_y)
        deltas = sample.calc_sample_deltas()
        self.assertAlmostEqual(-3, deltas[1][0][0])
        self.assertAlmostEqual(-1.6, deltas[0][0][1])
コード例 #6
0
class NeuralNet():
    def __init__(self,
                 layer_lengths,
                 learning_rate=.3,
                 lmbda=.1,
                 numIter=200,
                 epsilon=0.125):
        """
        layer_lengths will be an array where each value is the size of that layer
        e.x. [3,4,5] - would have an input layer of size 3, a hidden layer of size 4, 
        and an output layer of size 5.
        Note: These do not include bias terms.
        """
        #initialize weights
        self.weights = Weights(layer_lengths, epsilon)
        self.layer_lengths = layer_lengths
        self.num_layers = len(self.layer_lengths)
        self.learning_rate = learning_rate
        #index of the last layer
        self.L = self.num_layers - 1
        self.lmbda = lmbda

    def train(self, X, Y, num_iterations):
        """
        where m = X.shape[1] = Y.shape[1] = #samples
        X = self.layer_lengths[0] x m ndarray
        Y = self.layer_lengths[self.L] x m ndarray
        """
        if self.layer_lengths[0] != X.shape[0]:
            print("num features (" + X.shape[0] +
                  ") does not match input layer_lengths[0] (" +
                  self.layer_lengths[0] + ")")
            sys.exit()
        if self.layer_lengths[self.L] != Y.shape[0]:
            print("output layers don't match (" + Y.shape[0] + ") and (" +
                  self.layer_lengths[self.L] + ")")
            sys.exit()
        if X.shape[1] != Y.shape[1]:
            #TODO proper error checking
            print("inequal sample sizes for input and outputs")
            sys.exit()
        for i in range(0, num_iterations):
            print("starting iteration: " + str(i))
            self.train_iteration(X, Y)

    def train_iteration(self, X, Y):
        iteration = Iteration(self.weights, X, Y, self.lmbda)
        partials = iteration.calc_error_partials()

        #TODO some function that takes in theta and dJ/dTheta and gives the new theta
        #for now, do a simple change of dJ/dtheta * learning_rate. Eventually convert
        #to a more sophisticated gradient descent algorithm
        for i in range(0, self.L):
            next_theta = np.subtract(self.weights.get_layer(i),
                                     self.learning_rate * partials[i])
            self.weights.update_layer(i, next_theta)

    def error(self, expected, actual):
        """
        .5 * Sum(i) (expected[i] - actual[i])^2
        """
        diff = np.subtract(expected, actual)
        diff_squared = np.square(diff)
        return 0.5 * np.sum(diff_squared)
コード例 #7
0
class LSTMLayer:
    def __init__(self,
                 layer_idx,
                 is_training,
                 tau,
                 bidirectional_inp=False,
                 prev_neurons=None):
        self.rnn_config = get_rnn_config()
        self.train_config = get_train_config()
        self.layer_config = self.rnn_config['layer_configs'][layer_idx]
        if prev_neurons is None:
            if bidirectional_inp:
                self.w_shape = (self.rnn_config['layout'][layer_idx - 1] * 2 +
                                self.rnn_config['layout'][layer_idx],
                                self.rnn_config['layout'][layer_idx])
            else:
                self.w_shape = (self.rnn_config['layout'][layer_idx - 1] +
                                self.rnn_config['layout'][layer_idx],
                                self.rnn_config['layout'][layer_idx])
        else:
            self.w_shape = (prev_neurons +
                            self.rnn_config['layout'][layer_idx],
                            self.rnn_config['layout'][layer_idx])
        self.b_shape = (1, self.w_shape[1])
        self.cell_access_mat = []
        self.is_training = is_training

        # Activation summaries and specific neurons to gather individual histograms
        self.acts = dict()
        self.act_neurons = np.random.choice(
            range(self.b_shape[1]),
            size=(get_info_config()['tensorboard']['single_acts'], ),
            replace=False)

        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'x' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_b_x = []
            self.bn_s_x = []
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'h' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_b_h = []
            self.bn_s_h = []

        with tf.variable_scope(self.layer_config['var_scope']):
            var_keys = ['wi', 'bi', 'wc', 'bc', 'wo', 'bo']
            self.weights = Weights(var_keys, self.layer_config, self.w_shape,
                                   self.b_shape, tau)

    # TODO: Update PFP (currently does not work)
    def create_pfp(self, x_m, x_v, mod_layer_config, init, init_cell=None):
        if init:
            cell_shape = (tf.shape(x_m)[0], self.b_shape[1])
            self.weights.tensor_dict['cs_m'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['cs_v'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['co_m'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['co_v'] = tf.zeros(cell_shape)

        if self.train_config['batchnorm']:
            raise Exception(
                'Batchnorm not implemented for probabilistic forward pass')

        # Vector concatenation (input with recurrent)
        m = tf.concat([x_m, self.weights.tensor_dict['co_m']], axis=1)
        v = tf.concat([x_v, self.weights.tensor_dict['co_v']], axis=1)

        a_i_m, a_i_v = approx_activation(self.weights.var_dict['wi_m'],
                                         self.weights.var_dict['wi_v'],
                                         self.weights.var_dict['bi_m'],
                                         self.weights.var_dict['bi_v'], m, v)
        i_m, i_v = transform_sig_activation(a_i_m, a_i_v)
        a_c_m, a_c_v = approx_activation(self.weights.var_dict['wc_m'],
                                         self.weights.var_dict['wc_v'],
                                         self.weights.var_dict['bc_m'],
                                         self.weights.var_dict['bc_v'], m, v)
        c_m, c_v = transform_tanh_activation(a_c_m, a_c_v)
        a_o_m, a_o_v = approx_activation(self.weights.var_dict['wo_m'],
                                         self.weights.var_dict['wo_v'],
                                         self.weights.var_dict['bo_m'],
                                         self.weights.var_dict['bo_v'], m, v)
        o_m, o_v = transform_sig_activation(a_o_m, a_o_v)

        f_m = 1 - i_m
        f_v = i_v
        f_2nd_mom = tf.square(f_m) + f_v
        i_2nd_mom = tf.square(i_m) + i_v
        self.weights.tensor_dict['cs_v'] = tf.multiply(self.weights.tensor_dict['cs_v'], f_2nd_mom) + tf.multiply(c_v, i_2nd_mom) + \
                                           tf.multiply(tf.square(self.weights.tensor_dict['cs_m']), f_v) + tf.multiply(tf.square(c_m), i_v)
        self.weights.tensor_dict['cs_m'] = tf.multiply(
            f_m, self.weights.tensor_dict['cs_m']) + tf.multiply(i_m, c_m)

        c_tan_m, c_tan_v = transform_tanh_activation(
            self.weights.tensor_dict['cs_m'], self.weights.tensor_dict['cs_v'])
        o_2nd_mom = tf.square(o_m) + o_v
        self.weights.tensor_dict['co_m'] = tf.multiply(c_tan_m, o_m)
        self.weights.tensor_dict['co_v'] = tf.multiply(
            c_tan_v, o_2nd_mom) + tf.multiply(tf.square(c_tan_m), o_v)

        return self.weights.tensor_dict['co_m'], self.weights.tensor_dict[
            'co_v']

    # Local reparametrization trick
    def create_l_sampling_pass(self,
                               x,
                               mod_layer_config,
                               time_idx,
                               init,
                               init_cell=None):
        if init:
            cell_shape = (tf.shape(x)[0], self.b_shape[1])
            if init_cell is not None:
                self.weights.tensor_dict['cs'] = init_cell
            else:
                self.weights.tensor_dict['cs'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['co'] = tf.zeros(cell_shape)

        co = self.weights.tensor_dict['co']
        if self.train_config['batchnorm']['type'] == 'batch':
            bn_idx = min(time_idx, self.train_config['batchnorm']['tau'] - 1)
            if 'x' in self.train_config['batchnorm']['modes']:
                if len(self.bn_b_x) == bn_idx:
                    self.bn_b_x.append(get_batchnormalizer())
                x = self.bn_b_x[bn_idx](x, self.is_training)
            if 'h' in self.train_config['batchnorm']['modes'] and bn_idx > 0:
                if len(self.bn_b_h) == bn_idx - 1:
                    self.bn_b_h.append(get_batchnormalizer())
                co = self.bn_b_h[bn_idx - 1](co, self.is_training)

        x = tf.concat([x, co], axis=1)

        if 'i' in self.rnn_config['act_disc']:
            i = self.weights.sample_activation(
                'wi', 'bi', x, 'sig', init,
                self.train_config['batchnorm']['type'] == 'layer')
        else:
            a_i = self.weights.sample_activation(
                'wi', 'bi', x, None, init,
                self.train_config['batchnorm']['type'] == 'layer')
            i = tf.sigmoid(a_i)
        f = 1. - i

        if 'c' in self.rnn_config['act_disc']:
            c = self.weights.sample_activation(
                'wc', 'bc', x, 'tanh', init,
                self.train_config['batchnorm']['type'] == 'layer')
        else:
            a_c = self.weights.sample_activation(
                'wc', 'bc', x, None, init,
                self.train_config['batchnorm']['type'] == 'layer')
            c = tf.tanh(a_c)

        if 'o' in self.rnn_config['act_disc']:
            o = self.weights.sample_activation(
                'wo', 'bo', x, 'sig', init,
                self.train_config['batchnorm']['type'] == 'layer')
        else:
            a_o = self.weights.sample_activation(
                'wo', 'bo', x, None, init,
                self.train_config['batchnorm']['type'] == 'layer')
            o = tf.sigmoid(a_o)

        self.weights.tensor_dict['cs'] = tf.multiply(
            f, self.weights.tensor_dict['cs']) + tf.multiply(i, c)
        if 'i' in self.rnn_config['act_disc']:
            self.weights.tensor_dict['co'] = tf.multiply(
                self.weights.tensor_dict['cs'], o)
        else:
            self.weights.tensor_dict['co'] = tf.multiply(
                tf.tanh(self.weights.tensor_dict['cs']), o)

        return self.weights.tensor_dict['co'], self.weights.tensor_dict['cs']

    # Global reparametrization trick
    def create_g_sampling_pass(self,
                               x,
                               mod_layer_config,
                               time_idx,
                               init,
                               second_arm_pass=False,
                               data_key=None,
                               init_cell=None):
        #if len(self.rnn_config['act_disc']) != 0:
        #raise Exception('classic reparametrization does not work with discrete activations')

        if init:
            self.weights.create_tensor_samples(second_arm_pass=second_arm_pass,
                                               data_key=data_key)
            cell_shape = (tf.shape(x)[0], self.b_shape[1])
            if init_cell is not None:
                self.weights.tensor_dict['cs'] = init_cell
            else:
                self.weights.tensor_dict['cs'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['co'] = tf.zeros(cell_shape)

        co = self.weights.tensor_dict['co']
        if self.train_config['batchnorm']['type'] == 'batch':
            bn_idx = min(time_idx, self.train_config['batchnorm']['tau'] - 1)
            if 'x' in self.train_config['batchnorm']['modes']:
                if len(self.bn_b_x) == bn_idx:
                    self.bn_b_x.append(get_batchnormalizer())
                x = self.bn_b_x[bn_idx](x, self.is_training)
            if 'h' in self.train_config['batchnorm']['modes'] and bn_idx > 0:
                if len(self.bn_b_h) == bn_idx - 1:
                    self.bn_b_h.append(get_batchnormalizer())
                co = self.bn_b_h[bn_idx - 1](co, self.is_training)

        x = tf.concat([x, co], axis=1)

        i_act = tf.matmul(
            x, self.weights.tensor_dict['wi']) + self.weights.tensor_dict['bi']
        c_act = tf.matmul(
            x, self.weights.tensor_dict['wc']) + self.weights.tensor_dict['bc']
        o_act = tf.matmul(
            x, self.weights.tensor_dict['wo']) + self.weights.tensor_dict['bo']

        if self.train_config['batchnorm']['type'] == 'layer':
            i_act = tf.contrib.layers.layer_norm(i_act)
            c_act = tf.contrib.layers.layer_norm(c_act)
            o_act = tf.contrib.layers.layer_norm(o_act)

        if 'i' in self.rnn_config['act_disc']:
            print(self.rnn_config['act_bins'])
            i = disc_sigmoid(i_act, self.rnn_config['act_bins'])
        else:
            i = tf.sigmoid(i_act)

        f = 1. - i
        if 'c' in self.rnn_config['act_disc']:
            c = disc_tanh(c_act, self.rnn_config['act_bins'])
        else:
            c = tf.tanh(c_act)

        if 'o' in self.rnn_config['act_disc']:
            o = disc_sigmoid(o_act, self.rnn_config['act_bins'])
        else:
            o = tf.sigmoid(o_act)

        self.weights.tensor_dict['cs'] = tf.multiply(
            f, self.weights.tensor_dict['cs']) + tf.multiply(i, c)
        self.weights.tensor_dict['co'] = tf.multiply(
            o, tf.tanh(self.weights.tensor_dict['cs']))
        return self.weights.tensor_dict['co'], self.weights.tensor_dict['cs']

    def create_var_fp(self, x, time_idx, init, init_cell=None):
        if init:
            cell_shape = (tf.shape(x)[0], self.b_shape[1])
            if init_cell is not None:
                self.weights.tensor_dict['cs'] = init_cell
            else:
                self.weights.tensor_dict['cs'] = tf.zeros(cell_shape)
            self.weights.tensor_dict['co'] = tf.zeros(cell_shape)

        co = self.weights.tensor_dict['co']
        if self.train_config['batchnorm']['type'] == 'batch':
            bn_idx = min(time_idx, self.train_config['batchnorm']['tau'] - 1)
            if 'x' in self.train_config['batchnorm']['modes']:
                if len(self.bn_s_x) == bn_idx:
                    self.bn_s_x.append(get_batchnormalizer())
                x = self.bn_s_x[bn_idx](x, self.is_training)
            if 'h' in self.train_config['batchnorm']['modes'] and bn_idx > 0:
                if len(self.bn_s_h) == bn_idx - 1:
                    self.bn_s_h.append(get_batchnormalizer())
                co = self.bn_s_h[bn_idx - 1](co, self.is_training)

        x = tf.concat([x, co], axis=1)
        i_act = tf.matmul(
            x, self.weights.var_dict['wi']) + self.weights.var_dict['bi']
        c_act = tf.matmul(
            x, self.weights.var_dict['wc']) + self.weights.var_dict['bc']
        o_act = tf.matmul(
            x, self.weights.var_dict['wo']) + self.weights.var_dict['bo']

        if self.train_config['batchnorm']['type'] == 'layer':
            i_act = tf.contrib.layers.layer_norm(i_act)
            c_act = tf.contrib.layers.layer_norm(c_act)
            o_act = tf.contrib.layers.layer_norm(o_act)

        if init:
            for act_type, act in zip(['i', 'c', 'o'], [i_act, c_act, o_act]):
                self.acts[act_type] = act
                for neuron_idc in range(len(self.act_neurons)):
                    self.acts[act_type + '_' + str(neuron_idc)] = tf.slice(
                        act, begin=(0, neuron_idc), size=(-1, 1))

        else:
            for act_type, act in zip(['i', 'c', 'o'], [i_act, c_act, o_act]):
                self.acts[act_type] = tf.concat([act, self.acts[act_type]],
                                                axis=0)
                for neuron_idc in range(len(self.act_neurons)):
                    self.acts[act_type + '_' + str(neuron_idc)] = \
                        tf.concat([tf.slice(act, begin=(0, neuron_idc), size=(-1, 1)),
                                   self.acts[act_type + '_' + str(neuron_idc)]], axis=0)

        if 'i' in self.rnn_config['act_disc']:
            i = tf.cast(tf.cast(
                tf.sigmoid(i_act) * self.rnn_config['act_bins'],
                dtype=tf.int32),
                        dtype=tf.float32) / self.rnn_config['act_bins']
            if get_info_config()['cell_access']:
                self.cell_access_mat.append(i)

        else:
            i = tf.sigmoid(i_act)
        f = 1. - i

        if 'c' in self.rnn_config['act_disc']:
            c = tf.cast(tf.cast(
                tf.sigmoid(c_act) * self.rnn_config['act_bins'],
                dtype=tf.int32),
                        dtype=tf.float32) * 2 / self.rnn_config['act_bins'] - 1
        else:
            c = tf.tanh(c_act)

        if 'o' in self.rnn_config['act_disc']:
            o = tf.cast(tf.cast(
                tf.sigmoid(o_act) * self.rnn_config['act_bins'],
                dtype=tf.int32),
                        dtype=tf.float32) / self.rnn_config['act_bins']
        else:
            o = tf.sigmoid(o_act)

        self.weights.tensor_dict['cs'] = tf.multiply(
            f, self.weights.tensor_dict['cs']) + tf.multiply(i, c)
        if 'i' in self.rnn_config['act_disc']:
            self.weights.tensor_dict['co'] = tf.multiply(
                o, self.weights.tensor_dict['cs'])
        else:
            self.weights.tensor_dict['co'] = tf.multiply(
                o, tf.tanh(self.weights.tensor_dict['cs']))
        return self.weights.tensor_dict['co'], self.weights.tensor_dict['cs']
コード例 #8
0
ファイル: fc_layer.py プロジェクト: NikSQ/VarRNN
class FCLayer:
    def __init__(self, layer_idx, is_training, tau, prev_neurons=None):
        rnn_config = get_rnn_config()
        self.train_config = get_train_config()
        self.layer_config = rnn_config['layer_configs'][layer_idx]
        if prev_neurons is None:
            self.w_shape = (rnn_config['layout'][layer_idx - 1],
                            rnn_config['layout'][layer_idx])
        else:
            self.w_shape = (prev_neurons, rnn_config['layout'][layer_idx])
        self.b_shape = (1, self.w_shape[1])
        self.is_training = is_training

        # Activation summaries and specific neurons to gather individual histograms
        self.acts = dict()
        self.act_neurons = np.random.choice(
            range(self.b_shape[1]),
            size=(get_info_config()['tensorboard']['single_acts'], ),
            replace=False)
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'fc' in self.train_config['batchnorm'][
                    'modes']:
            self.bn_s_x = get_batchnormalizer()
            self.bn_b_x = get_batchnormalizer()

        with tf.variable_scope(self.layer_config['var_scope']):
            var_keys = ['w', 'b']
            self.weights = Weights(var_keys, self.layer_config, self.w_shape,
                                   self.b_shape, tau)

    def create_pfp(self, x_m, x_v, mod_layer_config, init):
        a_m, a_v = approx_activation(self.weights.var_dict['w_m'],
                                     self.weights.var_dict['w_v'],
                                     self.weights.var_dict['b_m'],
                                     self.weights.var_dict['b_v'], x_m, x_v)
        return a_m, a_v

    def create_l_sampling_pass(self,
                               x,
                               mod_layer_config,
                               time_idx,
                               init,
                               init_cell=None):
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'fc' in self.train_config['batchnorm'][
                    'modes']:
            x = self.bn_b_x(x, self.is_training)
        return self.weights.sample_activation(
            'w', 'b', x, None, init,
            self.train_config['batchnorm']['type'] == 'layer'), None

    def create_g_sampling_pass(self,
                               x,
                               mod_layer_config,
                               time_idx,
                               init,
                               second_arm_pass=False,
                               data_key=None,
                               init_cell=None):
        if init:
            self.weights.create_tensor_samples(second_arm_pass=second_arm_pass,
                                               data_key=data_key)
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'fc' in self.train_config['batchnorm'][
                    'modes']:
            x = self.bn_b_x(x, self.is_training)

        act = tf.matmul(x, self.weights.tensor_dict['w'])
        if self.layer_config['bias_enabled']:
            act += self.weights.tensor_dict['b']
        if self.train_config['batchnorm']['type'] == 'layer':
            return tf.contrib.layers.layer_norm(act)
        else:
            return act, None

    def create_var_fp(self, x, time_idx, init, init_cell=None):
        if self.train_config['batchnorm'][
                'type'] == 'batch' and 'fc' in self.train_config['batchnorm'][
                    'modes']:
            x = self.bn_s_x(x, self.is_training)

        act = tf.matmul(x, self.weights.var_dict['w'])
        if self.layer_config['bias_enabled']:
            act += self.weights.var_dict['b']
        if self.train_config['batchnorm']['type'] == 'layer':
            act = tf.contrib.layers.layer_norm(act)

        # Store activations over layer and over single neurons.
        if get_rnn_config()['architecture'] == 'encoder':
            return act, None
        if time_idx == 0:
            self.acts['n'] = act
            for neuron_idc in range(len(self.act_neurons)):
                self.acts['n' + '_' + str(neuron_idc)] = tf.slice(
                    act, begin=(0, neuron_idc), size=(-1, 1))
        else:
            self.acts['n'] = tf.concat([self.acts['n'], act], 0)
            for neuron_idc in range(len(self.act_neurons)):
                self.acts['n' + '_' + str(neuron_idc)] = tf.concat([
                    tf.slice(act, begin=(0, neuron_idc), size=(-1, 1)),
                    self.acts['n_' + str(neuron_idc)]
                ],
                                                                   axis=0)

        return act, None