def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        i0 = tf.constant(0)
        if self.hidden_init == 'identity':
            h1 = tf.identity(x)
            h2 = tf.identity(x)
        elif self.hidden_init == 'random':
            h1 = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
            h2 = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
        elif self.hidden_init == 'zeros':
            h1 = tf.zeros_like(x)
            h2 = tf.zeros_like(x)
        else:
            raise RuntimeError

        # While loop
        elems = [i0, x, h1, h2]
        returned = tf.while_loop(self.condition,
                                 self.full,
                                 loop_vars=elems,
                                 back_prop=True,
                                 swap_memory=True)

        # Prepare output
        i0, x, h1, h2 = returned
        return h2
Esempio n. 2
0
    def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        x_shape = x.get_shape().as_list()
        i0 = 0  # tf.constant(0)
        if self.hidden_init == 'identity':
            h2 = tf.identity(x)
        elif self.hidden_init == 'random':
            h2 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
        elif self.hidden_init == 'zeros':
            h2 = tf.zeros_like(x)
        elif self.hidden_init == 'learned':
            h2 = tf.get_variable(name='%s_h2_state' % self.layer_name,
                                 dtype=self.dtype,
                                 initializer=initialization.xavier_initializer(
                                     shape=x_shape,
                                     uniform=self.normal_initializer),
                                 trainable=self.train)
        else:
            raise RuntimeError

        # Loop
        for idx in range(self.timesteps):
            i0, x, h2 = self.full(i0=i0, x=x, h2=h2)
        return h2
Esempio n. 3
0
    def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        x_shape = x.get_shape().as_list()
        if self.hidden_init == 'identity':
            h1 = tf.identity(x)
            h2 = tf.identity(x)
        elif self.hidden_init == 'random':
            h1 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
            h2 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
        elif self.hidden_init == 'zeros':
            h1 = tf.zeros_like(x)
            h2 = tf.zeros_like(x)
        else:
            raise RuntimeError

        if not self.while_loop:
            for idx in range(self.timesteps):
                _, x, h1, h2 = self.full(i0=idx, x=x, h1=h1, h2=h2)
        else:
            # While loop
            i0 = tf.constant(0)
            elems = [i0, x, h1, h2]
            returned = tf.while_loop(self.condition,
                                     self.full,
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=False)

            # Prepare output
            i0, x, h1, h2 = returned
        return h2
Esempio n. 4
0
    def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        i0 = tf.constant(0)
        if self.hidden_init == 'identity':
            h1 = tf.identity(x)
            h2 = tf.identity(x)
        elif self.hidden_init == 'random':
            h1 = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
            h2 = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
        elif self.hidden_init == 'zeros':
            h1 = tf.zeros_like(x)
            h2 = tf.zeros_like(x)
        else:
            raise RuntimeError

        # While loop
        h1_list, h2_list = [], []
        for idx in range(self.timesteps):
            i0, x, h1, h2 = self.full(i0, x, h1, h2)
            h1_list += [h1]
            h2_list += [h2]
        return h2, h2_list, h1_list
Esempio n. 5
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        with tf.variable_scope('%s_hgru_weights' % self.layer_name):
            self.horizontal_kernels = tf.get_variable(
                name='%s_horizontal' % self.layer_name,
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.h_shape,
                    uniform=self.normal_initializer),
                trainable=True)
            self.h_bias = tf.get_variable(
                name='%s_h_bias' % self.layer_name,
                initializer=initialization.xavier_initializer(
                    shape=self.bias_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.gain_kernels = tf.get_variable(
                name='%s_gain' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=initialization.xavier_initializer(
                    shape=self.g_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.mix_kernels = tf.get_variable(
                name='%s_mix' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=initialization.xavier_initializer(
                    shape=self.m_shape,
                    uniform=self.normal_initializer,
                    mask=None))

            # Gain bias
            if self.gate_bias_init == 'chronos':
                bias_init = -tf.log(
                    tf.random_uniform(
                        self.bias_shape, minval=1, maxval=self.timesteps - 1))
            else:
                bias_init = tf.ones(self.bias_shape)
            self.gain_bias = tf.get_variable(
                name='%s_gain_bias' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=bias_init)
            if self.gate_bias_init == 'chronos':
                bias_init = -bias_init
            else:
                bias_init = tf.ones(self.bias_shape)
            self.mix_bias = tf.get_variable(
                name='%s_mix_bias' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=bias_init)
Esempio n. 6
0
    def build(self, reduce_memory=False):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        i0 = tf.constant(0)
        if self.hidden_init == 'identity':
            I = tf.identity(self.X)
            O = tf.identity(self.X)
        elif self.hidden_init == 'random':
            I = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
            O = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
        elif self.hidden_init == 'zeros':
            I = tf.zeros_like(self.X)
            O = tf.zeros_like(self.X)
        else:
            raise RuntimeError

        if reduce_memory:
            print 'Warning: Using FF version of the model.'
            for t in range(self.timesteps):
                i0, O, I = self.full(i0, O, I)
        else:
            # While loop
            elems = [i0, O, I]

            returned = tf.while_loop(self.condition,
                                     self.full,
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=False)

            # Prepare output
            i0, O, I = returned  # i0, O, I

        if self.return_weights:
            weights = self.gather_tensors(wak='weight')
            tuning = self.gather_tensors(wak='tuning')
            new_tuning = {}
            for k, v in tuning.iteritems():
                key_name = v.name.split('/')[-1].split(':')[0]
                new_tuning[key_name] = v
            weights = dict(weights, **new_tuning)
            activities = self.gather_tensors(wak='activity')
            # Attach weights if using association field
            if self.association_field:
                weights['p_t'] = self.p_r  # Make available for regularization
            if self.full_far_eCRF:
                weights['t_t'] = self.t_r  # Make available for regularization
            return O, weights, activities
        else:
            return O
Esempio n. 7
0
def sparse_pool_layer(self,
                      bottom,
                      out_channels,
                      name,
                      in_channels=None,
                      aux=None):
    """Sparse pooling layer."""
    def create_gaussian_rf(xy, h, w):
        """Create a gaussian bump for initializing the spatial weights."""
        # TODO: implement this.
        import ipdb
        ipdb.set_trace()
        raise NotImplementedError

    with tf.variable_scope(name):
        bottom_shape = [int(x) for x in bottom.get_shape()]
        if in_channels is None:
            in_channels = bottom_shape[-1]

        # K channel weights
        channel_weights = tf.get_variable(
            name='%s_channel' % name,
            dtype=tf.float32,
            initializer=initialization.xavier_initializer(
                shape=[in_channels, out_channels], uniform=True, mask=None))

        # HxW spatial weights
        spatial_weights = tf.get_variable(
            name='%s_spatial' % name,
            dtype=tf.float32,
            initializer=initialization.xavier_initializer(
                shape=[1, bottom_shape[1], bottom_shape[2], 1], mask=None))

        # If supplied, initialize the spatial weights with RF info
        if aux is not None and 'xy' in aux.keys():
            gaussian_xy = aux['xy']
            if 'h' in aux.keys():
                gaussian_h = aux['h']
                gaussian_w = aux['w']
                k = aux['k']
            else:
                gaussian_h, gaussian_w, k = None, None, None
            spatial_rf = create_gaussian_rf(xy=gaussian_xy,
                                            h=gaussian_h,
                                            w=gaussian_w,
                                            k=k)
            spatial_weights += spatial_rf
        spatial_sparse = tf.reduce_mean(bottom * spatial_weights,
                                        reduction_indices=[1, 2])
        output = tf.matmul(spatial_sparse, channel_weights)
        return self, output
    def build(self, reduce_memory=False):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        i0 = tf.constant(0)
        I = initialization.xavier_initializer(
            shape=[self.n, self.h, self.w, self.k],
            uniform=self.normal_initializer,
            mask=None)
        O = initialization.xavier_initializer(
            shape=[self.n, self.h, self.w, self.k],
            uniform=self.normal_initializer,
            mask=None)

        if reduce_memory:
            print 'Warning: Using FF version of the model.'
            for t in range(self.timesteps):
                i0, O, I = self[self.model_version](i0, O, I)
        else:
            # While loop
            elems = [i0, O, I]

            returned = tf.while_loop(self.condition,
                                     self[self.model_version],
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=False)
            # Prepare output
            i0, O, I = returned  # i0, O, I
        if self.return_weights:
            weights = self.gather_tensors(wak='weight')
            tuning = self.gather_tensors(wak='tuning')
            new_tuning = {}
            for k, v in tuning.iteritems():
                key_name = v.name.split('/')[-1].split(':')[0]
                new_tuning[key_name] = v
            weights = dict(weights, **new_tuning)
            activities = self.gather_tensors(wak='activity')
            return O, weights, activities
        else:
            return O
Esempio n. 9
0
    def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        x_shape = x.get_shape().as_list()
        i0 = 0  # tf.constant(0)
        if self.hidden_init == 'identity':
            h1 = tf.identity(x)
            h2 = tf.identity(x)
        elif self.hidden_init == 'random':
            h1 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
            h2 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
        elif self.hidden_init == 'zeros':
            h1 = tf.zeros_like(x)
            h2 = tf.zeros_like(x)
        else:
            raise RuntimeError

        # Loop
        for idx in range(self.timesteps):
            i0, x, h1, h2 = self.full(i0=i0, x=x, h1=h1, h2=h2)
        return h2
Esempio n. 10
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                },
                'f': {
                    'weight': 'u_f',
                    'bias': 'ub_f',
                    'activity': 'U_f'
                }
            },
            'T': {
                'r': {
                    'weight': 't_r',
                    'activity': 'T_r'
                },
                'f': {
                    'weight': 't_f',
                    'bias': 'tb_f',
                    'activity': 'T_f'
                }
            },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r'
                },
                'f': {
                    'weight': 'p_f',
                    'bias': 'pb_f',
                    'activity': 'P_f'
                }
            },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r'
                },
                'f': {
                    'weight': 'q_f',
                    'bias': 'qb_f',
                    'activity': 'Q_f'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'activity': 'I_r'
                }
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'activity': 'O_r'
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        setattr(
            self, self.weight_dict['Q']['r']['weight'],
            tf.get_variable(name=self.weight_dict['Q']['r']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.q_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

        # untuned suppression: reduction across feature axis
        ####################################################
        setattr(
            self, self.weight_dict['U']['r']['weight'],
            tf.get_variable(name=self.weight_dict['U']['r']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.u_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

        # tuned summation: pooling in h, w dimensions
        #############################################
        p_array = np.zeros(self.p_shape)
        for pdx in range(self.k):
            p_array[:self.SSN, :self.SSN, pdx, pdx] = 1.0
        p_array[self.SSN // 2 - py_utils.ifloor(self.SRF / 2.0):self.SSN // 2 +
                py_utils.iceil(self.SRF / 2.0),
                self.SSN // 2 - py_utils.ifloor(self.SRF / 2.0):self.SSN // 2 +
                py_utils.iceil(self.SRF / 2.0), :,  # exclude CRF!
                :] = 0.0

        setattr(
            self, self.weight_dict['P']['r']['weight'],
            tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                            dtype=self.dtype,
                            initializer=p_array.astype(np.float32),
                            trainable=False))

        # tuned suppression: pooling in h, w dimensions
        ###############################################
        t_array = np.zeros(self.t_shape)
        for tdx in range(self.k):
            t_array[:self.SSF, :self.SSF, tdx, tdx] = 1.0
        t_array[self.SSF // 2 - py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                py_utils.iceil(self.SSN / 2.0),
                self.SSF // 2 - py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                py_utils.iceil(self.SSN / 2.0), :,  # exclude near surround!
                :] = 0.0
        setattr(
            self, self.weight_dict['T']['r']['weight'],
            tf.get_variable(name=self.weight_dict['T']['r']['weight'],
                            dtype=self.dtype,
                            initializer=t_array.astype(np.float32),
                            trainable=False))

        # Scalar weights
        self.xi = tf.get_variable(name='xi', initializer=1.)
        self.alpha = tf.get_variable(name='alpha', initializer=1.)
        self.beta = tf.get_variable(name='beta', initializer=1.)
        self.mu = tf.get_variable(name='mu', initializer=1.)
        self.nu = tf.get_variable(name='nu', initializer=1.)
        self.zeta = tf.get_variable(name='zeta', initializer=1.)
        self.gamma = tf.get_variable(name='gamma', initializer=1.)
        self.delta = tf.get_variable(name='delta', initializer=1.)
        self.eps = tf.get_variable(name='eps', initializer=1.)
        self.eta = tf.get_variable(name='eta', initializer=1.)
        self.sig = tf.get_variable(name='sig', initializer=1.)
        self.tau = tf.get_variable(name='tau', initializer=1.)
    def build(self, x):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        full_x_shape = x.get_shape().as_list()
        i0 = tf.constant(0)

        # Remove time dimension
        x_shape = [full_x_shape[0]] + full_x_shape[2:]

        # Set up hiddens
        if self.hidden_init == 'identity':
            h1 = tf.identity(x[:, 0])
            h2 = tf.identity(x[:, 0])
        elif self.hidden_init == 'random':
            h1 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
            h2 = initialization.xavier_initializer(
                shape=x_shape, uniform=self.normal_initializer, mask=None)
        elif self.hidden_init == 'zeros':
            h1 = tf.zeros(x_shape)
            h2 = tf.zeros(x_shape)
        else:
            raise RuntimeError

        # While loop
        if self.while_loop:
            # Get a dynamic h2 to store all h2s
            dh2 = tf.TensorArray(dtype=tf.float32,
                                 size=self.timesteps,
                                 dynamic_size=False,
                                 clear_after_read=True,
                                 name='h2_list')

            elems = [i0, x, h1, h2, dh2]
            returned = tf.while_loop(self.condition,
                                     self.full,
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=False)
            i0, x, h1, h2, dh2 = returned
            dh2 = dh2.stack()
            dh2 = tf.transpose(dh2, (1, 0, 2, 3, 4))
            dh2.set_shape(full_x_shape)
        else:
            dh2 = []
            timesteps = self.timesteps + self.warmup
            count, fake_count = 0, 0
            for idx in range(timesteps):
                if idx < self.warmup:
                    # Burn in the 0th frame
                    it_x = x[:, 0, :, :, :]
                    i0, _, h1, h2, _ = self.full(i0=i0,
                                                 x=it_x,
                                                 h1=h1,
                                                 h2=h2,
                                                 dh2=None,
                                                 idx=(1 + idx) * 999)
                else:
                    it_x = x[:, count, :, :, :]
                    if self.additional_recurrence:
                        for _ in range(self.additional_recurrence + 1):
                            i0, _, h1, h2, _ = self.full(i0=i0,
                                                         x=it_x,
                                                         h1=h1,
                                                         h2=h2,
                                                         dh2=None,
                                                         idx=fake_count)
                            fake_count += 1
                    else:
                        i0, _, h1, h2, _ = self.full(i0=i0,
                                                     x=it_x,
                                                     h1=h1,
                                                     h2=h2,
                                                     dh2=None,
                                                     idx=count)
                    dh2 += [h2]
                    count += 1
                    fake_count += 1
            dh2 = tf.stack(dh2, axis=1)

        # Prepare output
        return dh2
Esempio n. 12
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                    },
                'f': {
                    'weight': 'u_f',
                    'bias': 'ub_f',
                    'activity': 'U_f'
                    }
                },
            'T': {
                'r': {
                    'weight': 't_r',
                    'activity': 'T_r'
                    },
                'f': {
                    'weight': 't_f',
                    'bias': 'tb_f',
                    'activity': 'T_f'
                    }
                },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r'
                    },
                'f': {
                    'weight': 'p_f',
                    'bias': 'pb_f',
                    'activity': 'P_f'
                    }
                },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r'
                    },
                'f': {
                    'weight': 'q_f',
                    'bias': 'qb_f',
                    'activity': 'Q_f'
                    }
                },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'bias': 'ib_r',
                    'activity': 'I_r'
                }
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'bias': 'ob_r',
                    'activity': 'O_r'
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        setattr(
            self,
            self.weight_dict['Q']['r']['weight'],
            tf.get_variable(
                name=self.weight_dict['Q']['r']['weight'],
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.q_shape,
                    uniform=self.normal_initializer,
                    mask=None)))

        # untuned suppression: reduction across feature axis
        ####################################################
        setattr(
            self,
            self.weight_dict['U']['r']['weight'],
            tf.get_variable(
                name=self.weight_dict['U']['r']['weight'],
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.u_shape,
                    uniform=self.normal_initializer,
                    mask=None)))

        # tuned summation: pooling in h, w dimensions
        #############################################
        p_array = np.zeros(self.p_shape)
        for pdx in range(self.k):
            p_array[:self.SSN, :self.SSN, pdx, pdx] = 1.0
        p_array[
            self.SSN // 2 - py_utils.ifloor(
                self.SRF / 2.0):self.SSN // 2 + py_utils.iceil(
                self.SRF / 2.0),
            self.SSN // 2 - py_utils.ifloor(
                self.SRF / 2.0):self.SSN // 2 + py_utils.iceil(
                self.SRF / 2.0),
            :,  # exclude CRF!
            :] = 0.0

        setattr(
            self,
            self.weight_dict['P']['r']['weight'],
            tf.get_variable(
                name=self.weight_dict['P']['r']['weight'],
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.p_shape,
                    uniform=self.normal_initializer,
                    mask=p_array)))

        # tuned suppression: pooling in h, w dimensions
        ###############################################
        t_array = np.zeros(self.t_shape)
        for tdx in range(self.k):
            t_array[:self.SSF, :self.SSF, tdx, tdx] = 1.0
        t_array[
            self.SSF // 2 - py_utils.ifloor(
                self.SSN / 2.0):self.SSF // 2 + py_utils.iceil(
                self.SSN / 2.0),
            self.SSF // 2 - py_utils.ifloor(
                self.SSN / 2.0):self.SSF // 2 + py_utils.iceil(
                self.SSN / 2.0),
            :,  # exclude near surround!
            :] = 0.0
        setattr(
            self,
            self.weight_dict['T']['r']['weight'],
            tf.get_variable(
                name=self.weight_dict['T']['r']['weight'],
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.t_shape,
                    uniform=self.normal_initializer,
                    mask=t_array)))

        if self.model_version != 'no_input_facing':
            # Also create input-facing weight matrices
            # Q
            setattr(
                self,
                self.weight_dict['Q']['f']['weight'],
                tf.get_variable(
                    name=self.weight_dict['Q']['f']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=self.q_shape,
                        uniform=self.normal_initializer)))
            setattr(
                self,
                self.weight_dict['Q']['f']['bias'],
                tf.get_variable(
                    name=self.weight_dict['Q']['f']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=[self.q_shape[-1]],
                        uniform=self.normal_initializer)))

            # U
            setattr(
                self,
                self.weight_dict['U']['f']['weight'],
                tf.get_variable(
                    name=self.weight_dict['U']['f']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=self.u_shape,
                        uniform=self.normal_initializer)))
            setattr(
                self,
                self.weight_dict['U']['f']['bias'],
                tf.get_variable(
                    name=self.weight_dict['U']['f']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        [self.u_shape[-1]],
                        uniform=self.normal_initializer)))

            # P
            setattr(
                self,
                self.weight_dict['P']['f']['weight'],
                tf.get_variable(
                    name=self.weight_dict['P']['f']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        self.p_shape,
                        uniform=self.normal_initializer,
                        mask=p_array)))
            setattr(
                self,
                self.weight_dict['P']['f']['bias'],
                tf.get_variable(
                    name=self.weight_dict['P']['f']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        [self.p_shape[-1]],
                        uniform=self.normal_initializer,
                        mask=None)))

            # T
            setattr(
                self,
                self.weight_dict['T']['f']['weight'],
                tf.get_variable(
                    name=self.weight_dict['T']['f']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=self.t_shape,
                        uniform=self.normal_initializer,
                        mask=t_array)))
            setattr(
                self,
                self.weight_dict['T']['f']['bias'],
                tf.get_variable(
                    name=self.weight_dict['T']['f']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=[self.t_shape[-1]],
                        uniform=self.normal_initializer,
                        mask=None)))

        if self.model_version == 'full_with_cell_states':
            # Input
            setattr(
                self,
                self.weight_dict['I']['r']['weight'],
                tf.get_variable(
                    name=self.weight_dict['I']['r']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=self.i_shape,
                        uniform=self.normal_initializer,
                        mask=None)))
            setattr(
                self,
                self.weight_dict['I']['r']['bias'],
                tf.get_variable(
                    name=self.weight_dict['I']['r']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=[self.k],
                        uniform=self.normal_initializer,
                        mask=None)))

            # Output
            setattr(
                self,
                self.weight_dict['O']['r']['weight'],
                tf.get_variable(
                    name=self.weight_dict['O']['r']['weight'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=self.o_shape,
                        uniform=self.normal_initializer,
                        mask=None)))
            setattr(
                self,
                self.weight_dict['O']['r']['bias'],
                tf.get_variable(
                    name=self.weight_dict['O']['r']['bias'],
                    dtype=self.dtype,
                    initializer=initialization.xavier_initializer(
                        shape=[self.k],
                        uniform=self.normal_initializer,
                        mask=None)))

        # Scalar weights
        self.alpha = tf.get_variable(name='alpha', initializer=1.)
        self.tau = tf.get_variable(name='tau', initializer=1.)
        self.eta = tf.get_variable(name='eta', initializer=1.)
        self.omega = tf.get_variable(name='omega', initializer=1.)
        self.eps = tf.get_variable(name='eps', initializer=1.)
        self.gamma = tf.get_variable(name='gamma', initializer=1.)
    def build(self):
        """Run the backprop version of the CCircuit."""
        self.prepare_tensors()
        i0 = tf.constant(0)
        if self.hidden_init == 'identity':
            I = tf.identity(self.X)
            O = tf.identity(self.X)
        elif self.hidden_init == 'random':
            I = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
            O = initialization.xavier_initializer(
                shape=[self.n, self.h, self.w, self.k],
                uniform=self.normal_initializer,
                mask=None)
        elif self.hidden_init == 'zeros':
            I = tf.zeros_like(self.X)
            O = tf.zeros_like(self.X)
        else:
            raise RuntimeError

        if self.store_states:
            store_I = tf.TensorArray(tf.float32, size=self.timesteps)
            store_O = tf.TensorArray(tf.float32, size=self.timesteps)
            elems = [i0, O, I, store_I, store_O]
            returned = tf.while_loop(self.condition,
                                     self.full,
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=True)

            # Prepare output
            i0, O, I, store_I, store_O = returned
            setattr(self, 'store_I', store_I.stack('store_I'))
            setattr(self, 'store_O', store_O.stack('store_O'))
        else:
            # While loop
            elems = [i0, O, I, tf.constant(0), tf.constant(0)]
            returned = tf.while_loop(self.condition,
                                     self.full,
                                     loop_vars=elems,
                                     back_prop=True,
                                     swap_memory=True)

            # Prepare output
            i0, O, I, _, _ = returned

        if self.return_weights:
            weights = self.gather_tensors(wak='weight')
            tuning = self.gather_tensors(wak='tuning')
            new_tuning = {}
            for k, v in tuning.iteritems():
                key_name = v.name.split('/')[-1].split(':')[0]
                new_tuning[key_name] = v
            weights = dict(weights, **new_tuning)
            activities = self.gather_tensors(wak='activity')
            # Attach weights if using association field
            if self.association_field:
                weights['p_t'] = self.p_r  # Make available for regularization
            if self.store_states:
                weights['store_I'] = store_I
                weights['store_O'] = store_O
            return O, weights, activities
        else:
            if self.store_states:
                return O  # , store_I, store_O
            else:
                return O
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        # Create FF vars
        if self.include_pooling:
            # Upsample FF layers then hgru layer
            up_filters = self.hgru_ids[0].values() + self.intermediate_ff
            for idx in reversed(range(1, len(up_filters))):
                label = idx - 1
                setattr(
                    self, 'resize_kernel_%s' % label,
                    tf.get_variable(
                        name='%s_resize_kernel_%s' % (self.layer_name, label),
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.up_kernel +
                            [up_filters[idx - 1], up_filters[idx]],
                            uniform=self.normal_initializer),
                        trainable=True))
                setattr(
                    self, 'resize_bias_%s' % label,
                    tf.get_variable(name='%s_resize_bias_%s' %
                                    (self.layer_name, label),
                                    dtype=self.dtype,
                                    initializer=tf.ones([up_filters[idx - 1]]),
                                    trainable=True))

        # Create conv filters that supply top-drive
        prev_filters = self.hgru_ids[0].values()[0]
        for idx, (ff_filters, ff_kernel, reps) in enumerate(
                zip(self.intermediate_ff, self.intermediate_ks,
                    self.intermediate_repeats)):
            for il in range(reps):
                setattr(
                    self, 'intermediate_kernel_%s_%s' % (idx, il),
                    tf.get_variable(
                        name='%s_ffdrive_kernel_%s_%s' %
                        (self.layer_name, idx, il),
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=ff_kernel + [prev_filters, ff_filters],
                            uniform=self.normal_initializer),
                        trainable=True))
                setattr(
                    self, 'intermediate_bias_%s_%s' % (idx, il),
                    tf.get_variable(name='%s_ffdrive_bias_%s_%s' %
                                    (self.layer_name, idx, il),
                                    dtype=self.dtype,
                                    initializer=tf.ones([ff_filters]),
                                    trainable=True))
                prev_filters = ff_filters

        # Create recurrent vars
        self.symm_k_tag = 'sy' if self.symmetric_weights else 'full'
        self.symm_g_tag = 'sy' if self.symmetric_gate_weights else 'full'
        for idx, hgru_id in enumerate(self.hgru_ids):
            layer, rk = hgru_id.items()[0]
            self.g_shape = [self.gate_filter, self.gate_filter, rk, rk]
            self.m_shape = [self.gate_filter, self.gate_filter, rk, rk]
            self.bias_shape = [1, 1, 1, rk]
            with tf.variable_scope('%s_hgru_weights_%s' %
                                   (self.layer_name, layer)):
                setattr(
                    self,
                    '%s_horizontal_kernels_exc_%s' % (self.symm_k_tag, layer),
                    tf.get_variable(
                        name='%s_%s_horizontal_exc' %
                        (self.symm_k_tag, self.layer_name),
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.h_ext[idx].values()[0] + [rk, rk],
                            uniform=self.normal_initializer),
                        trainable=True))
                setattr(
                    self,
                    '%s_horizontal_kernels_inh_%s' % (self.symm_k_tag, layer),
                    tf.get_variable(
                        name='%s_%s_horizontal_inh' %
                        (self.symm_k_tag, self.layer_name),
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.h_ext[idx].values()[0] + [rk, rk],
                            uniform=self.normal_initializer),
                        trainable=True))
                setattr(
                    self, '%s_gain_kernels_%s' % (self.symm_g_tag, layer),
                    tf.get_variable(
                        name='%s_%s_gain' % (self.symm_g_tag, self.layer_name),
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.g_shape,
                            uniform=self.normal_initializer,
                            mask=None)))
                setattr(
                    self, '%s_mix_kernels_%s' % (self.symm_g_tag, layer),
                    tf.get_variable(
                        name='%s_%s_mix' % (self.symm_g_tag, self.layer_name),
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.m_shape,
                            uniform=self.normal_initializer,
                            mask=None)))

                # Gain bias
                if self.gate_bias_init == 'chronos':
                    bias_init = -tf.log(
                        tf.random_uniform(self.bias_shape,
                                          minval=1,
                                          maxval=self.timesteps - 1))
                else:
                    bias_init = tf.ones(self.bias_shape)
                setattr(
                    self, 'gain_bias_%s' % layer,
                    tf.get_variable(name='%s_gain_bias' % self.layer_name,
                                    dtype=self.dtype,
                                    trainable=True,
                                    initializer=bias_init))
                if self.gate_bias_init == 'chronos':
                    bias_init = -bias_init
                else:
                    bias_init = tf.ones(self.bias_shape)
                setattr(
                    self, 'mix_bias_%s' % layer,
                    tf.get_variable(name='%s_mix_bias' % self.layer_name,
                                    dtype=self.dtype,
                                    trainable=True,
                                    initializer=bias_init))

                # Divisive params
                if self.alpha and not self.lesion_alpha:
                    setattr(
                        self, 'alpha_%s' % layer,
                        tf.get_variable(
                            name='%s_alpha' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
                elif self.lesion_alpha:
                    setattr(self, 'alpha_%s' % layer, tf.constant(0.))
                else:
                    setattr(self, 'alpha_%s' % layer, tf.constant(1.))

                if self.mu and not self.lesion_mu:
                    setattr(
                        self, 'mu_%s' % layer,
                        tf.get_variable(
                            name='%s_mu' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

                elif self.lesion_mu:
                    setattr(self, 'mu_%s' % layer, tf.constant(0.))
                else:
                    setattr(self, 'mu_%s' % layer, tf.constant(1.))

                if self.gamma:
                    setattr(
                        self, 'gamma_%s' % layer,
                        tf.get_variable(
                            name='%s_gamma' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
                else:
                    setattr(self, 'gamma_%s' % layer, tf.constant(1.))

                if self.multiplicative_excitation:
                    if self.lesion_kappa:
                        setattr(self, 'kappa_%s' % layer, tf.constant(0.))
                    else:
                        setattr(
                            self, 'kappa_%s' % layer,
                            tf.get_variable(
                                name='%s_kappa' % self.layer_name,
                                initializer=initialization.xavier_initializer(
                                    shape=self.bias_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
                    if self.lesion_omega:
                        setattr(self, 'omega_%s' % layer, tf.constant(0.))
                    else:
                        setattr(
                            self, 'omega_%s' % layer,
                            tf.get_variable(
                                name='%s_omega' % self.layer_name,
                                initializer=initialization.xavier_initializer(
                                    shape=self.bias_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
                else:
                    setattr(self, 'kappa_%s' % layer, tf.constant(1.))
                    setattr(self, 'omega_%s' % layer, tf.constant(1.))
                if self.adaptation:
                    setattr(
                        self, 'eta_%s' % layer,
                        tf.get_variable(
                            name='%s_eta' % self.layer_name,
                            initializer=tf.random_uniform([self.timesteps],
                                                          dtype=tf.float32)))
                if self.lesion_omega:
                    setattr(self, 'omega_%s' % layer, tf.constant(0.))
                if self.lesion_kappa:
                    setattr(self, 'kappa_%s' % layer, tf.constant(0.))
                if self.reuse:
                    # Make the batchnorm variables
                    scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn']
                    bn_vars = ['moving_mean', 'moving_variance', 'gamma']
                    for s in scopes:
                        with tf.variable_scope(s):
                            for v in bn_vars:
                                tf.get_variable(
                                    trainable=self.param_trainable[v],
                                    name=v,
                                    shape=[rk],
                                    collections=self.param_collections[v],
                                    initializer=self.param_initializer[v])
                    self.param_initializer = None
Esempio n. 15
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        with tf.variable_scope('%s_hgru_weights' % self.layer_name):
            self.horizontal_kernels = tf.get_variable(
                name='%s_horizontal' % self.layer_name,
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.h_shape,
                    uniform=self.normal_initializer),
                trainable=self.train)
            self.gain_kernels = tf.get_variable(
                name='%s_gain' % self.layer_name,
                dtype=self.dtype,
                trainable=self.train,
                initializer=initialization.xavier_initializer(
                    shape=self.g_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.mix_kernels = tf.get_variable(
                name='%s_mix' % self.layer_name,
                dtype=self.dtype,
                trainable=self.train,
                initializer=initialization.xavier_initializer(
                    shape=self.m_shape,
                    uniform=self.normal_initializer,
                    mask=None))

            # Gain bias
            if self.gate_bias_init == 'chronos':
                bias_init = -tf.log(
                    tf.random_uniform(
                        self.bias_shape, minval=1, maxval=self.timesteps - 1))
            else:
                bias_init = -tf.ones(self.bias_shape)
            self.gain_bias = tf.get_variable(
                name='%s_gain_bias' % self.layer_name,
                dtype=self.dtype,
                trainable=self.train,
                initializer=bias_init)
            if self.gate_bias_init == 'chronos':
                bias_init = -bias_init
            else:
                bias_init = tf.ones(self.bias_shape)
            self.mix_bias = tf.get_variable(
                name='%s_mix_bias' % self.layer_name,
                dtype=self.dtype,
                trainable=self.train,
                initializer=bias_init)

            # Divisive params
            if self.alpha and not self.lesion_alpha:
                self.alpha = tf.get_variable(
                    name='%s_alpha' % self.layer_name,
                    trainable=self.train,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            elif self.lesion_alpha:
                self.alpha = tf.constant(0.)
            else:
                self.alpha = tf.constant(1.)

            if self.mu and not self.lesion_mu:
                self.mu = tf.get_variable(
                    name='%s_mu' % self.layer_name,
                    trainable=self.train,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            elif self.lesion_mu:
                self.mu = tf.constant(0.)
            else:
                self.mu = tf.constant(1.)

            if self.gamma:
                self.gamma = tf.get_variable(
                    name='%s_gamma' % self.layer_name,
                    trainable=self.train,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            else:
                self.gamma = tf.constant(1.)

            if self.multiplicative_excitation:
                if self.lesion_kappa:
                    self.kappa = tf.constant(0.)
                else:
                    self.kappa = tf.get_variable(
                        name='%s_kappa' % self.layer_name,
                        trainable=self.train,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))
                if self.lesion_omega:
                    self.omega = tf.constant(0.)
                else:
                    self.omega = tf.get_variable(
                        name='%s_omega' % self.layer_name,
                        trainable=self.train,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))

            else:
                self.kappa = tf.constant(1.)
                self.omega = tf.constant(1.)

            if self.adaptation:
                self.eta = tf.get_variable(
                    trainable=self.train,
                    name='%s_eta' % self.layer_name,
                    shape=[self.timesteps],
                    initializer=tf.random_uniform_initializer)
            if self.lesion_omega:
                self.omega = tf.constant(0.)
            if self.lesion_kappa:
                self.kappa = tf.constant(0.)
Esempio n. 16
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                }
            },
            'T': {
                'r': {
                    'weight': 't_r',
                    'activity': 'T_r',
                    'tuning': 't_t'
                }
            },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r',
                    'tuning': 'p_t'
                }
            },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r',
                    'tuning': 'q_t'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'bias': 'i_b',
                    'activity': 'I_r'
                },
                'f': {  # Recurrent state
                    'weight': 'i_f',
                    'activity': 'I_f'
                },
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'bias': 'o_b',
                    'activity': 'O_r'
                },
                'f': {  # Recurrent state
                    'weight': 'o_f',
                    'activity': 'O_f'
                },
            },
            'xi': {
                'r': {  # Recurrent state
                    'weight': 'xi',
                }
            },
            'alpha': {
                'r': {  # Recurrent state
                    'weight': 'alpha',
                }
            },
            'beta': {
                'r': {  # Recurrent state
                    'weight': 'beta',
                }
            },
            'mu': {
                'r': {  # Recurrent state
                    'weight': 'mu',
                }
            },
            'nu': {
                'r': {  # Recurrent state
                    'weight': 'nu',
                }
            },
            'zeta': {
                'r': {  # Recurrent state
                    'weight': 'zeta',
                }
            },
            'gamma': {
                'r': {  # Recurrent state
                    'weight': 'gamma',
                }
            },
            'delta': {
                'r': {  # Recurrent state
                    'weight': 'delta',
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        q_array = np.ones(self.q_shape) / np.prod(self.q_shape)
        if 'Q' in self.lesions:
            q_array = np.zeros_like(q_array).astype(np.float32)
            print 'Lesioning CRF excitation.'
        setattr(
            self, self.weight_dict['Q']['r']['weight'],
            tf.get_variable(name=self.weight_dict['Q']['r']['weight'],
                            dtype=self.dtype,
                            initializer=q_array.astype(np.float32),
                            trainable=False))

        # untuned suppression: reduction across feature axis
        ####################################################
        u_array = np.ones(self.u_shape) / np.prod(self.u_shape)
        if 'U' in self.lesions:
            u_array = np.zeros_like(u_array).astype(np.float32)
            print 'Lesioning CRF inhibition.'
        setattr(
            self, self.weight_dict['U']['r']['weight'],
            tf.get_variable(name=self.weight_dict['U']['r']['weight'],
                            dtype=self.dtype,
                            initializer=u_array.astype(np.float32),
                            trainable=False))

        # weakly tuned summation: pooling in h, w dimensions
        #############################################
        p_array = np.ones(self.p_shape)
        #Try removing CRF punching
        if self.exclude_CRF:
            # Punch out the CRF
            p_array[self.SSN // 2 -
                    py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN / 2.0), self.SSN // 2 -
                    py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN / 2.0), :,  # exclude CRF!
                    :] = 0.0

        p_array = p_array / p_array.sum()

        if 'P' in self.lesions:
            print 'Lesioning near eCRF.'
            p_array = np.zeros_like(p_array).astype(np.float32)

        # Association field is fully learnable
        if self.association_field and 'P' not in self.lesions:
            setattr(
                self, self.weight_dict['P']['r']['weight'],
                tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=self.p_shape,
                                    uniform=self.normal_initializer),
                                trainable=True))
        else:
            setattr(
                self, self.weight_dict['P']['r']['weight'],
                tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                                dtype=self.dtype,
                                initializer=p_array.astype(np.float32),
                                trainable=False))

        # weakly tuned suppression: pooling in h, w dimensions
        ###############################################
        t_array = np.ones(self.t_shape)
        #Try without punching CRF
        if self.exclude_CRF:
            # Punch out the CRF
            t_array[self.SSF // 2 -
                    py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN / 2.0), self.SSF // 2 -
                    py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN /
                                   2.0), :,  # exclude near surround!
                    :] = 0.0

        t_array = t_array / t_array.sum()
        if 'T' in self.lesions:
            print 'Lesioning Far eCRF.'
            t_array = np.zeros_like(t_array).astype(np.float32)

        #Always set full_far_eCRF to True/initialize with Xavier
        if self.full_far_eCRF:
            setattr(
                self, self.weight_dict['T']['r']['weight'],
                tf.get_variable(name=self.weight_dict['T']['r']['weight'],
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=self.p_shape,
                                    uniform=self.normal_initializer),
                                trainable=True))
        else:
            setattr(
                self, self.weight_dict['T']['r']['weight'],
                tf.get_variable(name=self.weight_dict['T']['r']['weight'],
                                dtype=self.dtype,
                                initializer=t_array.astype(np.float32),
                                trainable=False))

        # Connectivity tensors -- Q/P/T
        if 'Q' in self.lesions:
            print 'Lesioning CRF excitation connectivity.'
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=False,
                                initializer=np.zeros(self.tuning_shape).astype(
                                    np.float32)))
        else:
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=initialization.xavier_initializer(
                                    shape=self.tuning_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
        if not self.association_field:
            # Need a tuning tensor for near surround
            if 'P' in self.lesions:
                print 'Lesioning near eCRF connectivity.'
                setattr(
                    self, self.weight_dict['P']['r']['tuning'],
                    tf.get_variable(name=self.weight_dict['P']['r']['tuning'],
                                    dtype=self.dtype,
                                    trainable=False,
                                    initializer=np.zeros(
                                        self.tuning_shape).astype(np.float32)))
            else:
                setattr(
                    self, self.weight_dict['P']['r']['tuning'],
                    tf.get_variable(
                        name=self.weight_dict['P']['r']['tuning'],
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.tuning_shape,
                            uniform=self.normal_initializer,
                            mask=None)))

        #Again, full_far_eCRF should be set to True for now
        import ipdb
        ipdb.set_trace()
        if not self.full_far_eCRF:
            # Need a tuning tensor for near surround
            if 'T' in self.lesions:
                print 'Lesioning far eCRF connectivity.'
                setattr(
                    self, self.weight_dict['T']['r']['tuning'],
                    tf.get_variable(name=self.weight_dict['T']['r']['tuning'],
                                    dtype=self.dtype,
                                    trainable=False,
                                    initializer=np.zeros(
                                        self.tuning_shape).astype(np.float32)))
        else:
            setattr(
                self, self.weight_dict['T']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['T']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=initialization.xavier_initializer(
                                    shape=self.tuning_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))

        # Input
        setattr(
            self, self.weight_dict['I']['r']['weight'],
            tf.get_variable(name=self.weight_dict['I']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['f']['weight'],
            tf.get_variable(name=self.weight_dict['I']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['r']['bias'],
            tf.get_variable(name=self.weight_dict['I']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Output
        setattr(
            self, self.weight_dict['O']['r']['weight'],
            tf.get_variable(name=self.weight_dict['O']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['f']['weight'],
            tf.get_variable(name=self.weight_dict['O']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['r']['bias'],
            tf.get_variable(name=self.weight_dict['O']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Vector weights
        w_array = np.ones([1, 1, 1, self.k]).astype(np.float32)
        b_array = np.zeros([1, 1, 1, self.k]).astype(np.float32)
        self.xi = tf.get_variable(name='xi', initializer=w_array)
        self.alpha = tf.get_variable(name='alpha', initializer=w_array)
        self.beta = tf.get_variable(name='beta', initializer=w_array)
        self.mu = tf.get_variable(name='mu', initializer=b_array)
        self.nu = tf.get_variable(name='nu', initializer=b_array)
        self.zeta = tf.get_variable(name='zeta', initializer=w_array)
        self.gamma = tf.get_variable(name='gamma', initializer=w_array)
        self.delta = tf.get_variable(name='delta', initializer=w_array)
Esempio n. 17
0
    def prepare_tensors(self):
        """
        """

        # recurrent kernels
        setattr(
            self, 'caps_filter',
            tf.get_variable(name='%s_caps_filter' % (self.name),
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=[self.h_ext, self.h_ext] +
                                [self.f * self.c, self.b * self.f * self.c],
                                uniform=self.normal_initializer),
                            trainable=True))
        if self.use_independent_labels_filter:
            setattr(
                self, 'labels_filter',
                tf.get_variable(name='%s_labels_filter' % (self.name),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=[self.h_ext, self.h_ext] +
                                    [self.f, self.b * self.f],
                                    uniform=self.normal_initializer),
                                trainable=True))
        else:
            setattr(self, 'labels_filter',
                    helpers.caps2scalar_filter(self.caps_filter, self.c))

        # label update variables
        if self.labels_update_mode == 'pushpull':
            setattr(
                self, 'pushpull_scale',
                tf.get_variable(name='%s_pushpull_scale' % (self.name),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=[1, 1, 1, self.f],
                                    uniform=self.normal_initializer),
                                trainable=True))
            setattr(
                self, 'pushpull_bias',
                tf.get_variable(name='%s_pushpull_bias' % (self.name),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=[1, 1, 1, self.f],
                                    uniform=self.normal_initializer),
                                trainable=True))
            self.sensitivity_const = None
        elif self.labels_update_mode == 'average':
            setattr(
                self, 'sensitivity_const',
                tf.get_variable(name='%s_sensitivity_const' % (self.name),
                                dtype=self.dtype,
                                shape=[1, 1, 1, self.f],
                                initializer=tf.constant_initializer(
                                    [0.5], dtype=self.dtype),
                                trainable=True))
            self.pushpull_scale = None
            self.pushpull_bias = None

        # label gate variables
        if self.use_labels_gate:
            setattr(
                self, 'labels_gate_scale',
                tf.get_variable(name='%s_labels_gate_scale' % (self.name),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=[1, 1, 1, self.f],
                                    uniform=self.normal_initializer),
                                trainable=True))
            setattr(
                self, 'labels_gate_bias',
                tf.get_variable(name='%s_labels_gate_bias' % (self.name),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=[1, 1, 1, self.f],
                                    uniform=self.normal_initializer),
                                trainable=True))
        else:
            self.labels_gate_scale = None
            self.labels_gate_bias = None

        # Auxilliary variables
        setattr(
            self, 'tolerance_const',
            tf.get_variable(name='%s_tolerance_const' % (self.name),
                            dtype=self.dtype,
                            shape=[1, 1, 1, self.f],
                            initializer=tf.constant_initializer(
                                [0.], dtype=self.dtype),
                            trainable=True))
        setattr(
            self, 'decay_const',
            tf.get_variable(name='%s_decay_const' % (self.name),
                            dtype=self.dtype,
                            shape=[1, 1, 1, self.f],
                            initializer=tf.constant_initializer(
                                [0.0], dtype=self.dtype),
                            trainable=True))
        import numpy as np
        fixed_labels_mask = np.zeros([1, 1, 1, self.f])
        if self.fixed_label_ind is not None:
            for idx in self.fixed_label_ind:
                fixed_labels_mask[0, 0, 0, idx] = 1.
        self.fixed_labels_mask = tf.constant(fixed_labels_mask,
                                             dtype=self.dtype)
Esempio n. 18
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        with tf.variable_scope('%s_hgru_weights' % self.layer_name):
            self.horizontal_kernels = tf.get_variable(
                name='%s_horizontal' % self.layer_name,
                dtype=self.dtype,
                initializer=initialization.xavier_initializer(
                    shape=self.h_shape, uniform=self.normal_initializer),
                trainable=True)
            self.h_bias = tf.get_variable(
                name='%s_h_bias' % self.layer_name,
                initializer=initialization.xavier_initializer(
                    shape=self.bias_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.f_kernels = tf.get_variable(
                name='%s_f' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=initialization.xavier_initializer(
                    shape=self.f_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.i_kernels = tf.get_variable(
                name='%s_i' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=initialization.xavier_initializer(
                    shape=self.i_shape,
                    uniform=self.normal_initializer,
                    mask=None))
            self.o_kernels = tf.get_variable(
                name='%s_o' % self.layer_name,
                dtype=self.dtype,
                trainable=True,
                initializer=initialization.xavier_initializer(
                    shape=self.i_shape,
                    uniform=self.normal_initializer,
                    mask=None))

            # Gain bias
            bias_init = tf.ones(self.bias_shape)
            self.f_bias = tf.get_variable(name='%s_f_bias' % self.layer_name,
                                          dtype=self.dtype,
                                          trainable=True,
                                          initializer=bias_init)
            self.i_bias = tf.get_variable(name='%s_i_bias' % self.layer_name,
                                          dtype=self.dtype,
                                          trainable=True,
                                          initializer=bias_init)
            self.o_bias = tf.get_variable(name='%s_o_bias' % self.layer_name,
                                          dtype=self.dtype,
                                          trainable=True,
                                          initializer=bias_init)

            # Divisive params
            if self.alpha and not self.lesion_alpha:
                self.alpha = tf.get_variable(
                    name='%s_alpha' % self.layer_name,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            elif self.lesion_alpha:
                self.alpha = tf.constant(0.)
            else:
                self.alpha = tf.constant(1.)

            if self.mu and not self.lesion_mu:
                self.mu = tf.get_variable(
                    name='%s_mu' % self.layer_name,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            elif self.lesion_mu:
                self.mu = tf.constant(0.)
            else:
                self.mu = tf.constant(1.)

            # if self.gamma:
            #     self.gamma = tf.get_variable(
            #         name='%s_gamma' % self.layer_name,
            #         initializer=initialization.xavier_initializer(
            #             shape=self.bias_shape,
            #             uniform=self.normal_initializer,
            #             mask=None))
            # else:
            #     self.gamma = tf.constant(1.)

            if self.multiplicative_excitation:
                if self.lesion_kappa:
                    self.kappa = tf.constant(0.)
                else:
                    self.kappa = tf.get_variable(
                        name='%s_kappa' % self.layer_name,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))
                if self.lesion_omega:
                    self.omega = tf.constant(0.)
                else:
                    self.omega = tf.get_variable(
                        name='%s_omega' % self.layer_name,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))
            else:
                self.kappa = tf.constant(1.)
                self.omega = tf.constant(1.)

            if self.adapation:
                self.eta = tf.get_variable(name='%s_eta' % self.layer_name,
                                           initializer=tf.ones(
                                               self.timesteps,
                                               dtype=tf.float32))
            if self.lesion_omega:
                self.omega = tf.constant(0.)
            if self.lesion_kappa:
                self.kappa = tf.constant(0.)
Esempio n. 19
0
    def prepare_tensors(self):

        # HGRU KERNELS
        self.hgru0.prepare_tensors()
        self.hgru_td0.prepare_tensors()

        # FEEDFORWARD KERNELS
        lower_feats = self.hgru_h2_k
        for idx, (higher_feats,
                  ff_dhw) in enumerate(zip(self.ff_conv_k, self.ff_conv_fsiz)):
            with tf.variable_scope(self.var_scope + '/ff_%s' % idx):
                setattr(
                    self, 'ff_%s_weights' % idx,
                    tf.get_variable(
                        name='weights',
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=ff_dhw + [lower_feats, higher_feats],
                            dtype=self.dtype,
                            uniform=True),
                        trainable=True))
                lower_feats = higher_feats

        # FEEDBACK KERNELS
        lower_feats = self.in_k
        if not self.share_ff_td_kernels:
            for idx, (higher_feats, fb_dhw) in enumerate(
                    zip(self.fb_conv_k, self.fb_conv_fsiz)):
                with tf.variable_scope(self.var_scope + '/fb_%s' % idx):
                    setattr(
                        self, 'fb_%s_weights' % idx,
                        tf.get_variable(
                            name='weights',
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=fb_dhw + [lower_feats, higher_feats],
                                dtype=self.dtype,
                                uniform=True),
                            trainable=True))
                lower_feats = higher_feats
        else:
            higher_feats = self.fb_conv_k[0]
            fb_dhw = self.fb_conv_fsiz[0]
            idx = 0
            with tf.variable_scope(self.var_scope + '/fb_%s' % idx):
                setattr(
                    self, 'fb_%s_weights' % idx,
                    tf.get_variable(
                        name='weights',
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=fb_dhw + [lower_feats, higher_feats],
                            dtype=self.dtype,
                            uniform=True),
                        trainable=True))

        for x in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                   scope=self.var_scope):
            fgru = 0
            ff_fb = 0
            prod = np.prod(x.get_shape().as_list())
            if ('fgru' in x.name):
                fgru += prod
            else:
                ff_fb += prod
            print('>>>>>>>>>>>>>>>>>>>>>>TRAINABLE VARS: ' + 'fgrus(' +
                  str(fgru) + ') ff&fb(' + str(ff_fb) + ')')
            print('>>>>>>>>>>>>>>>>>>>>>>TRAINABLE VARS: ' + 'total(' +
                  str(fgru + ff_fb) + ')')
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                }
            },
            'T': {
                'r': {
                    'weight': 't_r',
                    'activity': 'T_r',
                    'tuning': 't_t'
                }
            },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r',
                    'tuning': 'p_t'
                }
            },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r',
                    'tuning': 'q_t'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'activity': 'I_r'
                },
                'f': {  # Recurrent state
                    'weight': 'i_f',
                    'activity': 'I_f'
                },
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'activity': 'O_r'
                },
                'f': {  # Recurrent state
                    'weight': 'o_f',
                    'activity': 'O_f'
                },
            },
            'xi': {
                'r': {  # Recurrent state
                    'weight': 'xi',
                }
            },
            'alpha': {
                'r': {  # Recurrent state
                    'weight': 'alpha',
                }
            },
            'beta': {
                'r': {  # Recurrent state
                    'weight': 'beta',
                }
            },
            'mu': {
                'r': {  # Recurrent state
                    'weight': 'mu',
                }
            },
            'nu': {
                'r': {  # Recurrent state
                    'weight': 'nu',
                }
            },
            'zeta': {
                'r': {  # Recurrent state
                    'weight': 'zeta',
                }
            },
            'gamma': {
                'r': {  # Recurrent state
                    'weight': 'gamma',
                }
            },
            'delta': {
                'r': {  # Recurrent state
                    'weight': 'delta',
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        q_array = np.ones(self.q_shape) / np.prod(self.q_shape)
        setattr(
            self, self.weight_dict['Q']['r']['weight'],
            tf.get_variable(name=self.weight_dict['Q']['r']['weight'],
                            dtype=self.dtype,
                            initializer=q_array.astype(np.float32),
                            trainable=False))

        # untuned suppression: reduction across feature axis
        ####################################################
        u_array = np.ones(self.u_shape) / np.prod(self.u_shape)
        setattr(
            self, self.weight_dict['U']['r']['weight'],
            tf.get_variable(name=self.weight_dict['U']['r']['weight'],
                            dtype=self.dtype,
                            initializer=u_array.astype(np.float32),
                            trainable=False))

        # weakly tuned summation: pooling in h, w dimensions
        #############################################
        p_array = np.ones(self.p_shape)
        p_array[self.SSN // 2 - py_utils.ifloor(self.SRF / 2.0):self.SSN // 2 +
                py_utils.iceil(self.SRF / 2.0),
                self.SSN // 2 - py_utils.ifloor(self.SRF / 2.0):self.SSN // 2 +
                py_utils.iceil(self.SRF / 2.0), :,  # exclude CRF!
                :] = 0.0
        p_array = p_array / p_array.sum()

        setattr(
            self, self.weight_dict['P']['r']['weight'],
            tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                            dtype=self.dtype,
                            initializer=p_array.astype(np.float32),
                            trainable=False))

        # weakly tuned suppression: pooling in h, w dimensions
        ###############################################
        t_array = np.ones(self.t_shape)
        t_array[self.SSF // 2 - py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                py_utils.iceil(self.SSN / 2.0),
                self.SSF // 2 - py_utils.ifloor(self.SSN / 2.0):self.SSF // 2 +
                py_utils.iceil(self.SSN / 2.0), :,  # exclude near surround!
                :] = 0.0
        t_array = t_array / t_array.sum()
        setattr(
            self, self.weight_dict['T']['r']['weight'],
            tf.get_variable(name=self.weight_dict['T']['r']['weight'],
                            dtype=self.dtype,
                            initializer=t_array.astype(np.float32),
                            trainable=False))

        # Connectivity tensors -- Q/P/T
        setattr(
            self, self.weight_dict['Q']['r']['tuning'],
            tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.tuning_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['P']['r']['tuning'],
            tf.get_variable(name=self.weight_dict['P']['r']['tuning'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.tuning_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['T']['r']['tuning'],
            tf.get_variable(name=self.weight_dict['T']['r']['tuning'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.tuning_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

        # Input
        setattr(
            self, self.weight_dict['I']['r']['weight'],
            tf.get_variable(name=self.weight_dict['I']['r']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['f']['weight'],
            tf.get_variable(name=self.weight_dict['I']['f']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

        # Output
        setattr(
            self, self.weight_dict['O']['r']['weight'],
            tf.get_variable(name=self.weight_dict['O']['r']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['f']['weight'],
            tf.get_variable(name=self.weight_dict['O']['f']['weight'],
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

        # Vector weights
        w_array = np.ones([1, 1, 1, self.k]).astype(np.float32)
        b_array = np.zeros([1, 1, 1, self.k]).astype(np.float32)
        self.xi = tf.get_variable(name='xi', initializer=w_array)
        self.alpha = tf.get_variable(name='alpha', initializer=w_array)
        self.beta = tf.get_variable(name='beta', initializer=w_array)
        self.mu = tf.get_variable(name='mu', initializer=b_array)
        self.nu = tf.get_variable(name='nu', initializer=b_array)
        self.zeta = tf.get_variable(name='zeta', initializer=w_array)
        self.gamma = tf.get_variable(name='gamma', initializer=w_array)
        self.delta = tf.get_variable(name='delta', initializer=w_array)
Esempio n. 21
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                }
            },
            'T': {
                'r': {
                    'weight': 't_r',
                    'activity': 'T_r',
                    'tuning': 't_t'
                }
            },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r',
                    'tuning': 'p_t'
                }
            },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r',
                    'tuning': 'q_t'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'bias': 'i_b',
                    'activity': 'I_r'
                },
                'f': {  # Recurrent state
                    'weight': 'i_f',
                    'activity': 'I_f'
                },
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'bias': 'o_b',
                    'activity': 'O_r'
                },
                'f': {  # Recurrent state
                    'weight': 'o_f',
                    'activity': 'O_f'
                },
            },
            'xi': {
                'r': {  # Recurrent state
                    'weight': 'xi',
                }
            },
            'alpha': {
                'r': {  # Recurrent state
                    'weight': 'alpha',
                }
            },
            'beta': {
                'r': {  # Recurrent state
                    'weight': 'beta',
                }
            },
            'mu': {
                'r': {  # Recurrent state
                    'weight': 'mu',
                }
            },
            'nu': {
                'r': {  # Recurrent state
                    'weight': 'nu',
                }
            },
            'zeta': {
                'r': {  # Recurrent state
                    'weight': 'zeta',
                }
            },
            'gamma': {
                'r': {  # Recurrent state
                    'weight': 'gamma',
                }
            },
            'delta': {
                'r': {  # Recurrent state
                    'weight': 'delta',
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        q_array = np.ones(self.q_shape) / np.prod(self.q_shape)
        if 'Q' in self.lesions:
            q_array = np.zeros_like(q_array).astype(np.float32)
            print 'Lesioning CRF excitation.'
        setattr(
            self, self.weight_dict['Q']['r']['weight'],
            tf.get_variable(name=self.weight_dict['Q']['r']['weight'],
                            dtype=self.dtype,
                            initializer=q_array.astype(np.float32),
                            trainable=False))

        # untuned suppression: reduction across feature axis
        ####################################################
        u_array = np.ones(self.u_shape) / np.prod(self.u_shape)
        if 'U' in self.lesions:
            u_array = np.zeros_like(u_array).astype(np.float32)
            print 'Lesioning CRF inhibition.'
        setattr(
            self, self.weight_dict['U']['r']['weight'],
            tf.get_variable(name=self.weight_dict['U']['r']['weight'],
                            dtype=self.dtype,
                            initializer=u_array.astype(np.float32),
                            trainable=False))

        # weakly tuned summation: pooling in h, w dimensions
        #############################################
        p_array = np.ones(self.p_shape)
        p_array = p_array / p_array.sum()
        if 'P' in self.lesions:
            print 'Lesioning near eCRF.'
            p_array = np.zeros_like(p_array).astype(np.float32)

        # Association field is fully learnable
        if self.association_field and 'P' not in self.lesions:
            setattr(
                self,
                self.weight_dict['P']['r']['weight'],
                self.symmetric_weights(
                    tf.get_variable(
                        #tf.get_variable(
                        name=self.weight_dict['P']['r']['weight'] +
                        '_non_symm',
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.p_shape,
                            uniform=self.normal_initializer),
                        trainable=True),
                    self.weight_dict['P']['r']['weight']))
        else:
            setattr(
                self, self.weight_dict['P']['r']['weight'],
                tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                                dtype=self.dtype,
                                initializer=p_array.astype(np.float32),
                                trainable=False))

        # weakly tuned suppression: pooling in h, w dimensions
        ###############################################
        t_array = np.ones(self.t_shape)
        t_array = t_array / t_array.sum()
        if 'T' in self.lesions:
            print 'Lesioning Far eCRF.'
            t_array = np.zeros_like(t_array).astype(np.float32)
        setattr(
            self, self.weight_dict['T']['r']['weight'],
            tf.get_variable(name=self.weight_dict['T']['r']['weight'],
                            dtype=self.dtype,
                            initializer=t_array.astype(np.float32),
                            trainable=False))

        # Connectivity tensors -- Q/P/T
        if 'Q' in self.lesions:
            print 'Lesioning CRF excitation connectivity.'
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=False,
                                initializer=np.zeros(self.tuning_shape).astype(
                                    np.float32)))
        else:
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                self.symmetric_weights(
                    tf.get_variable(
                        name=self.weight_dict['Q']['r']['tuning'] +
                        '_non_symm',
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.tuning_shape,
                            uniform=self.normal_initializer,
                            mask=None)), self.weight_dict['Q']['r']['tuning']))
        if not self.association_field:
            # Need a tuning tensor for near surround
            if 'P' in self.lesions:
                print 'Lesioning near eCRF connectivity.'
                setattr(
                    self, self.weight_dict['P']['r']['tuning'],
                    tf.get_variable(name=self.weight_dict['P']['r']['tuning'],
                                    dtype=self.dtype,
                                    trainable=False,
                                    initializer=np.zeros(
                                        self.tuning_shape).astype(np.float32)))
            else:
                setattr(
                    self, self.weight_dict['P']['r']['tuning'],
                    self.symmetric_weights(
                        tf.get_variable(
                            name=self.weight_dict['P']['r']['tuning'] +
                            '_non_symm',
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.tuning_shape,
                                uniform=self.normal_initializer,
                                mask=None)),
                        self.weight_dict['P']['r']['tuning']))
        if 'T' in self.lesions:
            print 'Lesioning far eCRF connectivity.'
            setattr(
                self, self.weight_dict['T']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['T']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=False,
                                initializer=np.zeros(self.tuning_shape).astype(
                                    np.float32)))
        else:
            setattr(
                self, self.weight_dict['T']['r']['tuning'],
                self.symmetric_weights(
                    tf.get_variable(
                        name=self.weight_dict['T']['r']['tuning'] +
                        '_non_symm',
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.tuning_shape,
                            uniform=self.normal_initializer,
                            mask=None)), self.weight_dict['T']['r']['tuning']))

        # Input
        setattr(
            self, self.weight_dict['I']['r']['weight'],
            tf.get_variable(name=self.weight_dict['I']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['f']['weight'],
            tf.get_variable(name=self.weight_dict['I']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['r']['bias'],
            tf.get_variable(name=self.weight_dict['I']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Output
        setattr(
            self, self.weight_dict['O']['r']['weight'],
            tf.get_variable(name=self.weight_dict['O']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['f']['weight'],
            tf.get_variable(name=self.weight_dict['O']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['r']['bias'],
            tf.get_variable(name=self.weight_dict['O']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Vector weights
        w_array = np.ones([1, 1, 1, self.k]).astype(np.float32)
        b_array = np.zeros([1, 1, 1, self.k]).astype(np.float32)
        self.xi = tf.get_variable(name='xi', initializer=w_array)
        self.alpha = tf.get_variable(name='alpha', initializer=w_array)
        self.beta = tf.get_variable(name='beta', initializer=w_array)
        self.mu = tf.get_variable(name='mu', initializer=b_array)
        self.nu = tf.get_variable(name='nu', initializer=b_array)
        self.zeta = tf.get_variable(name='zeta', initializer=w_array)
        self.gamma = tf.get_variable(name='gamma', initializer=w_array)
        self.delta = tf.get_variable(name='delta', initializer=w_array)
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices."""
        self.weight_dict = {  # Weights lower/activity upper
            'U': {
                'r': {
                    'weight': 'u_r',
                    'activity': 'U_r'
                }
            },
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r',
                    'tuning': 'p_t'
                }
            },
            'Q': {
                'r': {
                    'weight': 'q_r',
                    'activity': 'Q_r',
                    'tuning': 'q_t'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'bias': 'i_b',
                    'activity': 'I_r'
                },
                'f': {  # Recurrent state
                    'weight': 'i_f',
                    'activity': 'I_f'
                },
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'bias': 'o_b',
                    'activity': 'O_r'
                },
                'f': {  # Recurrent state
                    'weight': 'o_f',
                    'activity': 'O_f'
                },
            },
            'xi': {
                'r': {  # Recurrent state
                    'weight': 'xi',
                }
            },
            'alpha': {
                'r': {  # Recurrent state
                    'weight': 'alpha',
                }
            },
            'beta': {
                'r': {  # Recurrent state
                    'weight': 'beta',
                }
            },
            'mu': {
                'r': {  # Recurrent state
                    'weight': 'mu',
                }
            },
            'nu': {
                'r': {  # Recurrent state
                    'weight': 'nu',
                }
            },
            'zeta': {
                'r': {  # Recurrent state
                    'weight': 'zeta',
                }
            },
            'gamma': {
                'r': {  # Recurrent state
                    'weight': 'gamma',
                }
            },
            'phi': {
                'r': {  # Recurrent state
                    'weight': 'phi',
                }
            },
            'kappa': {
                'r': {  # Recurrent state
                    'weight': 'kappa',
                }
            },
            'delta': {
                'r': {  # Recurrent state
                    'weight': 'delta',
                }
            }
        }

        # tuned summation: pooling in h, w dimensions
        #############################################
        q_array = np.ones(self.q_shape) / np.prod(self.q_shape)
        if 'Q' in self.lesions:
            q_array = np.zeros_like(q_array).astype(np.float32)
            print 'Lesioning CRF excitation.'
        setattr(
            self, self.weight_dict['Q']['r']['weight'],
            tf.get_variable(name=self.weight_dict['Q']['r']['weight'],
                            dtype=self.dtype,
                            initializer=q_array.astype(np.float32),
                            trainable=False))

        # untuned suppression: reduction across feature axis
        ####################################################
        u_array = np.ones(self.u_shape) / np.prod(self.u_shape)
        if 'U' in self.lesions:
            u_array = np.zeros_like(u_array).astype(np.float32)
            print 'Lesioning CRF inhibition.'
        setattr(
            self, self.weight_dict['U']['r']['weight'],
            tf.get_variable(name=self.weight_dict['U']['r']['weight'],
                            dtype=self.dtype,
                            initializer=u_array.astype(np.float32),
                            trainable=False))

        # weakly tuned summation: pooling in h, w dimensions
        #############################################
        if isinstance(self.p_shape[0], list) and 'P' not in self.lesions:
            # VGG-style filters
            for pidx, pext in enumerate(self.p_shape):
                if pidx == 0:
                    it_key = self.weight_dict['P']['r']['weight']
                else:
                    self.weight_dict['P']['r']['weight_%s' %
                                               pidx] = 'p_r_%s' % pidx
                    it_key = self.weight_dict['P']['r']['weight_%s' % pidx]
                setattr(
                    self, it_key,
                    tf.get_variable(
                        name=it_key,
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=pext, uniform=self.normal_initializer),
                        trainable=True))
        else:
            p_array = np.ones(self.p_shape)
            p_array[self.SSN // 2 -
                    py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN / 2.0), self.SSN // 2 -
                    py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                    py_utils.iceil(self.SSN / 2.0), :,  # exclude CRF!
                    :] = 0.0
            p_array = p_array / p_array.sum()
            if 'P' in self.lesions:
                print 'Lesioning near eCRF.'
                p_array = np.zeros_like(p_array).astype(np.float32)

            # Association field is fully learnable
            if self.association_field and 'P' not in self.lesions:
                setattr(
                    self, self.weight_dict['P']['r']['weight'],
                    tf.get_variable(
                        name=self.weight_dict['P']['r']['weight'],
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.p_shape,
                            uniform=self.normal_initializer),
                        trainable=True))
            else:
                setattr(
                    self, self.weight_dict['P']['r']['weight'],
                    tf.get_variable(name=self.weight_dict['P']['r']['weight'],
                                    dtype=self.dtype,
                                    initializer=p_array.astype(np.float32),
                                    trainable=False))

        # Connectivity tensors -- Q/P/T
        if 'Q' in self.lesions:
            print 'Lesioning CRF excitation connectivity.'
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=False,
                                initializer=np.zeros(self.tuning_shape).astype(
                                    np.float32)))
        else:
            setattr(
                self, self.weight_dict['Q']['r']['tuning'],
                tf.get_variable(name=self.weight_dict['Q']['r']['tuning'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=initialization.xavier_initializer(
                                    shape=self.tuning_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
        # Gate weights
        setattr(
            self, self.weight_dict['I']['r']['weight'],
            tf.get_variable(name=self.weight_dict['I']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['f']['weight'],
            tf.get_variable(name=self.weight_dict['I']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.i_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['I']['r']['bias'],
            tf.get_variable(name=self.weight_dict['I']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Output
        setattr(
            self, self.weight_dict['O']['r']['weight'],
            tf.get_variable(name=self.weight_dict['O']['r']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(
            self, self.weight_dict['O']['f']['weight'],
            tf.get_variable(name=self.weight_dict['O']['f']['weight'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=initialization.xavier_initializer(
                                shape=self.o_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
        setattr(  # TODO: smart initialization of these
            self, self.weight_dict['O']['r']['bias'],
            tf.get_variable(name=self.weight_dict['O']['r']['bias'],
                            dtype=self.dtype,
                            trainable=True,
                            initializer=tf.ones(self.bias_shape)))

        # Degree of freedom weights (vectors)
        w_array = np.ones([1, 1, 1, self.k]).astype(np.float32)
        b_array = np.zeros([1, 1, 1, self.k]).astype(np.float32)

        # Divisive params
        self.alpha = tf.get_variable(name='alpha', initializer=w_array)
        self.beta = tf.get_variable(name='beta', initializer=w_array)

        # Subtractive params
        self.mu = tf.get_variable(name='mu', initializer=b_array)
        self.nu = tf.get_variable(name='nu', initializer=b_array)
        if self.zeta:
            self.zeta = tf.get_variable(name='zeta', initializer=w_array)
        else:
            self.zeta = tf.constant(1.)
        if self.gamma:
            self.gamma = tf.get_variable(name='gamma', initializer=w_array)
        else:
            self.gamma = tf.constant(1.)
        if self.delta:
            self.delta = tf.get_variable(name='delta', initializer=w_array)
        else:
            self.delta = tf.constant(1.)
        if self.xi:
            self.xi = tf.get_variable(name='xi', initializer=w_array)
        else:
            self.xi = tf.constant(1.)
        if self.multiplicative_excitation:
            self.kappa = tf.get_variable(name='kappa', initializer=w_array)
            self.omega = tf.get_variable(name='omega', initializer=w_array)
        else:
            self.kappa = tf.constant(1.)
            self.omega = tf.constant(1.)
Esempio n. 23
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        if self.constrain:
            constraint = lambda x: tf.clip_by_value(x, 0, np.infty)
        else:
            constraint = None
        self.var_scope = '%s_hgru_weights' % self.layer_name
        with tf.variable_scope(self.var_scope):
            if self.symmetric_weights and self.symmetric_inits:
                h_init = self.symmetric_init(
                    initialization.xavier_initializer(
                        shape=self.h_shape, uniform=self.normal_initializer))
            else:
                h_init = initialization.xavier_initializer(
                    shape=self.h_shape, uniform=self.normal_initializer)
            self.horizontal_kernels = tf.get_variable(name='%s_horizontal' %
                                                      self.layer_name,
                                                      dtype=self.dtype,
                                                      initializer=h_init,
                                                      trainable=self.train)
            if self.symmetric_gate_weights and self.symmetric_inits:
                g_init = self.symmetric_init(
                    initialization.xavier_initializer(
                        shape=self.g_shape, uniform=self.normal_initializer))
                m_init = self.symmetric_init(
                    initialization.xavier_initializer(
                        shape=self.m_shape, uniform=self.normal_initializer))
            else:
                g_init = initialization.xavier_initializer(
                    shape=self.g_shape, uniform=self.normal_initializer)
                m_init = initialization.xavier_initializer(
                    shape=self.m_shape, uniform=self.normal_initializer)
            self.gain_kernels = tf.get_variable(name='%s_gain' %
                                                self.layer_name,
                                                dtype=self.dtype,
                                                trainable=self.train,
                                                initializer=g_init)
            self.mix_kernels = tf.get_variable(name='%s_mix' % self.layer_name,
                                               dtype=self.dtype,
                                               trainable=self.train,
                                               initializer=m_init)

            # Gain bias
            if self.gate_bias_init == 'chronos':
                bias_init = -tf.log(
                    tf.random_uniform(
                        self.bias_shape, minval=1, maxval=self.timesteps - 1))
            else:
                bias_init = -tf.ones(self.bias_shape)
            self.gain_bias = tf.get_variable(name='%s_gain_bias' %
                                             self.layer_name,
                                             dtype=self.dtype,
                                             trainable=self.train,
                                             initializer=bias_init)
            if self.gate_bias_init == 'chronos':
                bias_init = -bias_init
            else:
                bias_init = tf.ones(self.bias_shape)
            self.mix_bias = tf.get_variable(name='%s_mix_bias' %
                                            self.layer_name,
                                            dtype=self.dtype,
                                            trainable=self.train,
                                            initializer=bias_init)

            # Divisive params
            if self.alpha and not self.lesion_alpha:
                self.alpha = tf.get_variable(
                    name='%s_alpha' % self.layer_name,
                    trainable=self.train,
                    constraint=constraint,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            elif self.lesion_alpha:
                self.alpha = tf.constant(0.)
            else:
                self.alpha = tf.constant(1.)

            if self.mu and not self.lesion_mu:
                self.mu = tf.get_variable(
                    name='%s_mu' % self.layer_name,
                    trainable=self.train,
                    constraint=constraint,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))

            elif self.lesion_mu:
                self.mu = tf.constant(0.)
            else:
                self.mu = tf.constant(1.)

            if self.gamma:
                self.gamma = tf.get_variable(
                    name='%s_gamma' % self.layer_name,
                    trainable=self.train,
                    constraint=constraint,
                    initializer=initialization.xavier_initializer(
                        shape=self.bias_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            else:
                self.gamma = tf.constant(1.)

            if self.multiplicative_excitation:
                if self.lesion_kappa:
                    self.kappa = tf.constant(0.)
                else:
                    self.kappa = tf.get_variable(
                        name='%s_kappa' % self.layer_name,
                        trainable=self.train,
                        constraint=constraint,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))
                if self.lesion_omega:
                    self.omega = tf.constant(0.)
                else:
                    self.omega = tf.get_variable(
                        name='%s_omega' % self.layer_name,
                        trainable=self.train,
                        constraint=constraint,
                        initializer=initialization.xavier_initializer(
                            shape=self.bias_shape,
                            uniform=self.normal_initializer,
                            mask=None))

            else:
                self.kappa = tf.constant(1.)
                self.omega = tf.constant(1.)

            if self.adaptation:
                self.eta = tf.get_variable(
                    trainable=self.train,
                    name='%s_eta' % self.layer_name,
                    shape=[self.timesteps],
                    initializer=tf.random_uniform_initializer)
            if self.lesion_omega:
                self.omega = tf.constant(0.)
            if self.lesion_kappa:
                self.kappa = tf.constant(0.)
            if self.reuse:
                # Make the batchnorm variables
                scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn']
                bn_vars = ['moving_mean', 'moving_variance', 'gamma']
                for s in scopes:
                    with tf.variable_scope(s) as scope:
                        for v in bn_vars:
                            tf.get_variable(
                                trainable=self.param_trainable[v],
                                name=v,
                                shape=[self.k],
                                collections=self.param_collections[v],
                                initializer=self.param_initializer[v])
                self.param_initializer = None
Esempio n. 24
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        # Create FF vars
        setattr(
            self, 'resize_kernel',
            tf.get_variable(name='%s_resize_kernel' % self.layer_name,
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=self.pooling_kernel + [self.k, self.k],
                                uniform=self.normal_initializer),
                            trainable=True))
        setattr(
            self, 'resize_bias',
            tf.get_variable(name='%s_resize_bias' % self.layer_name,
                            dtype=self.dtype,
                            initializer=tf.ones([self.k]),
                            trainable=True))

        for idx, (ff_filters, ff_kernel) in enumerate(
                zip(self.intermediate_ff, self.intermediate_ks)):
            setattr(
                self, 'intermediate_kernel_%s' % idx,
                tf.get_variable(name='%s_ffdrive_kernel_%s' %
                                (self.layer_name, idx),
                                dtype=self.dtype,
                                initializer=initialization.xavier_initializer(
                                    shape=ff_kernel + [ff_filters, ff_filters],
                                    uniform=self.normal_initializer),
                                trainable=True))
            setattr(
                self, 'intermediate_bias_%s' % idx,
                tf.get_variable(name='%s_ffdrive_bias_%s' %
                                (self.layer_name, idx),
                                dtype=self.dtype,
                                initializer=tf.ones([ff_filters]),
                                trainable=True))

        # Create recurrent vars
        for idx, layer in enumerate(self.hgru_ids):
            with tf.variable_scope('%s_hgru_weights_%s' %
                                   (self.layer_name, layer)):
                setattr(
                    self, 'horizontal_kernels_%s' % layer,
                    tf.get_variable(
                        name='%s_horizontal' % self.layer_name,
                        dtype=self.dtype,
                        initializer=initialization.xavier_initializer(
                            shape=self.h_ext[idx] + [self.k, self.k],
                            uniform=self.normal_initializer),
                        trainable=True))
                setattr(
                    self, 'gain_kernels_%s' % layer,
                    tf.get_variable(
                        name='%s_gain' % self.layer_name,
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.g_shape,
                            uniform=self.normal_initializer,
                            mask=None)))
                setattr(
                    self, 'mix_kernels_%s' % layer,
                    tf.get_variable(
                        name='%s_mix' % self.layer_name,
                        dtype=self.dtype,
                        trainable=True,
                        initializer=initialization.xavier_initializer(
                            shape=self.m_shape,
                            uniform=self.normal_initializer,
                            mask=None)))

                # Gain bias
                if self.gate_bias_init == 'chronos':
                    bias_init = -tf.log(
                        tf.random_uniform(self.bias_shape,
                                          minval=1,
                                          maxval=self.timesteps - 1))
                else:
                    bias_init = tf.ones(self.bias_shape)
                setattr(
                    self, 'gain_bias_%s' % layer,
                    tf.get_variable(name='%s_gain_bias' % self.layer_name,
                                    dtype=self.dtype,
                                    trainable=True,
                                    initializer=bias_init))
                if self.gate_bias_init == 'chronos':
                    bias_init = -bias_init
                else:
                    bias_init = tf.ones(self.bias_shape)
                setattr(
                    self, 'mix_bias_%s' % layer,
                    tf.get_variable(name='%s_mix_bias' % self.layer_name,
                                    dtype=self.dtype,
                                    trainable=True,
                                    initializer=bias_init))

                # Divisive params
                if self.alpha and not self.lesion_alpha:
                    setattr(
                        self, 'alpha_%s' % layer,
                        tf.get_variable(
                            name='%s_alpha' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
                elif self.lesion_alpha:
                    setattr(self, 'alpha_%s' % layer, tf.constant(0.))
                else:
                    setattr(self, 'alpha_%s' % layer, tf.constant(1.))

                if self.mu and not self.lesion_mu:
                    setattr(
                        self, 'mu_%s' % layer,
                        tf.get_variable(
                            name='%s_mu' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))

                elif self.lesion_mu:
                    setattr(self, 'mu_%s' % layer, tf.constant(0.))
                else:
                    setattr(self, 'mu_%s' % layer, tf.constant(1.))

                if self.gamma:
                    setattr(
                        self, 'gamma_%s' % layer,
                        tf.get_variable(
                            name='%s_gamma' % self.layer_name,
                            initializer=initialization.xavier_initializer(
                                shape=self.bias_shape,
                                uniform=self.normal_initializer,
                                mask=None)))
                else:
                    setattr(self, 'gamma_%s' % layer, tf.constant(1.))

                if self.multiplicative_excitation:
                    if self.lesion_kappa:
                        setattr(self, 'kappa_%s' % layer, tf.constant(0.))
                    else:
                        setattr(
                            self, 'kappa_%s' % layer,
                            tf.get_variable(
                                name='%s_kappa' % self.layer_name,
                                initializer=initialization.xavier_initializer(
                                    shape=self.bias_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
                    if self.lesion_omega:
                        setattr(self, 'omega_%s' % layer, tf.constant(0.))
                    else:
                        setattr(
                            self, 'omega_%s' % layer,
                            tf.get_variable(
                                name='%s_omega' % self.layer_name,
                                initializer=initialization.xavier_initializer(
                                    shape=self.bias_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
                else:
                    setattr(self, 'kappa_%s' % layer, tf.constant(1.))
                    setattr(self, 'omega_%s' % layer, tf.constant(1.))
                if self.adapation:
                    setattr(
                        self, 'eta_%s' % layer,
                        tf.get_variable(
                            name='%s_eta' % self.layer_name,
                            initializer=tf.random_uniform([self.timesteps],
                                                          dtype=tf.float32)))
                if self.lesion_omega:
                    setattr(self, 'omega_%s' % layer, tf.constant(0.))
                if self.lesion_kappa:
                    setattr(self, 'kappa_%s' % layer, tf.constant(0.))
                if self.reuse:
                    # Make the batchnorm variables
                    scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn']
                    bn_vars = ['moving_mean', 'moving_variance', 'gamma']
                    for s in scopes:
                        with tf.variable_scope(s):
                            for v in bn_vars:
                                tf.get_variable(
                                    trainable=self.param_trainable[v],
                                    name=v,
                                    shape=[self.k],
                                    collections=self.param_collections[v],
                                    initializer=self.param_initializer[v])
                    self.param_initializer = None
Esempio n. 25
0
    def prepare_tensors(self):
        """ Prepare recurrent/forward weight matrices.
        9 * k + (2 * k^2) params in the greek letters/gates.
        (np.prod([h, w, k]) / 2) - k params in the surround filter
        """
        self.weight_dict = {  # Weights lower/activity upper
            'P': {
                'r': {
                    'weight': 'p_r',
                    'activity': 'P_r',
                    'tuning': 'p_t',
                    # 'bias': 'i_b'
                }
            },
            'I': {
                'r': {  # Recurrent state
                    'weight': 'i_r',
                    'bias': 'i_b',
                    'activity': 'I_r'
                },
                # 'f': {  # Recurrent state
                #     'weight': 'i_f',
                #     'activity': 'I_f'
                # },
            },
            'O': {
                'r': {  # Recurrent state
                    'weight': 'o_r',
                    'bias': 'o_b',
                    'activity': 'O_r'
                },
                # 'f': {  # Recurrent state
                #     'weight': 'o_f',
                #     'activity': 'O_f'
                # },
            },
            'xi': {
                'r': {  # Recurrent state
                    'weight': 'xi',
                }
            },
            # 'alpha': {
            #     'r': {  # Recurrent state
            #         'weight': 'alpha',
            #     }
            # },
            'beta': {
                'r': {  # Recurrent state
                    'weight': 'beta',
                }
            },
            # 'mu': {
            #     'r': {  # Recurrent state
            #         'weight': 'mu',
            #     }
            # },
            'nu': {
                'r': {  # Recurrent state
                    'weight': 'nu',
                }
            },
            'zeta': {
                'r': {  # Recurrent state
                    'weight': 'zeta',
                }
            },
            'gamma': {
                'r': {  # Recurrent state
                    'weight': 'gamma',
                }
            },
            'phi': {
                'r': {  # Recurrent state
                    'weight': 'phi',
                }
            },
            'kappa': {
                'r': {  # Recurrent state
                    'weight': 'kappa',
                }
            },
            'rho': {
                'r': {  # Recurrent state
                    'weight': 'rho',
                }
            },
        }

        # weakly tuned summation: pooling in h, w dimensions
        #############################################
        with tf.variable_scope('contextual_circuit'):
            if isinstance(self.p_shape[0], list) and 'P' not in self.lesions:
                # VGG-style filters
                for pidx, pext in enumerate(self.p_shape):
                    if pidx == 0:
                        it_key = self.weight_dict['P']['r']['weight']
                    else:
                        self.weight_dict['P']['r']['weight_%s' %
                                                   pidx] = 'p_r_%s' % pidx
                        it_key = self.weight_dict['P']['r']['weight_%s' % pidx]
                    setattr(
                        self, it_key,
                        tf.get_variable(
                            name=it_key,
                            dtype=self.dtype,
                            initializer=initialization.xavier_initializer(
                                shape=pext, uniform=self.normal_initializer),
                            trainable=True))
            else:
                p_array = np.ones(self.p_shape)
                p_array[self.SSN // 2 -
                        py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                        py_utils.iceil(self.SSN / 2.0), self.SSN // 2 -
                        py_utils.ifloor(self.SRF / 2.0):self.SSF // 2 +
                        py_utils.iceil(self.SSN / 2.0), :,  # exclude CRF!
                        :] = 0.0
                p_array = p_array / p_array.sum()
                if 'P' in self.lesions:
                    print 'Lesioning near eCRF.'
                    p_array = np.zeros_like(p_array).astype(np.float32)

                # Association field is fully learnable
                if self.association_field and 'P' not in self.lesions:
                    setattr(
                        self,
                        self.weight_dict['P']['r']['weight'],
                        tf.get_variable(
                            name=self.weight_dict['P']['r']['weight'],
                            dtype=self.dtype,
                            # shape=self.p_shape,
                            initializer=initialization.xavier_initializer(
                                shape=self.p_shape,
                                uniform=self.normal_initializer),
                            trainable=True))
                else:
                    setattr(
                        self, self.weight_dict['P']['r']['weight'],
                        tf.get_variable(
                            name=self.weight_dict['P']['r']['weight'],
                            dtype=self.dtype,
                            initializer=p_array.astype(np.float32),
                            trainable=False))

            # Gate weights
            setattr(
                self, self.weight_dict['I']['r']['weight'],
                tf.get_variable(name=self.weight_dict['I']['r']['weight'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=initialization.xavier_initializer(
                                    shape=self.i_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
            # setattr(
            #     self,
            #     self.weight_dict['I']['f']['weight'],
            #     tf.get_variable(
            #         name=self.weight_dict['I']['f']['weight'],
            #         dtype=self.dtype,
            #         trainable=True,
            #         initializer=initialization.xavier_initializer(
            #             shape=self.i_shape,
            #             uniform=self.normal_initializer,
            #             mask=None)))
            if self.gate_bias_init == 'chronos':
                bias_init = -tf.log(
                    tf.random_uniform(
                        self.bias_shape, minval=1, maxval=self.timesteps - 1))
            else:
                bias_init = tf.ones(self.bias_shape)
            setattr(
                self, self.weight_dict['I']['r']['bias'],
                tf.get_variable(name=self.weight_dict['I']['r']['bias'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=bias_init))

            # Output
            setattr(
                self, self.weight_dict['O']['r']['weight'],
                tf.get_variable(name=self.weight_dict['O']['r']['weight'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=initialization.xavier_initializer(
                                    shape=self.o_shape,
                                    uniform=self.normal_initializer,
                                    mask=None)))
            # setattr(
            #     self,
            #     self.weight_dict['O']['f']['weight'],
            #     tf.get_variable(
            #         name=self.weight_dict['O']['f']['weight'],
            #         dtype=self.dtype,
            #         trainable=True,
            #         initializer=initialization.xavier_initializer(
            #             shape=self.o_shape,
            #             uniform=self.normal_initializer,
            #             mask=None)))
            if self.gate_bias_init == 'chronos':
                # bias_init = -tf.log(
                #     tf.random_uniform(
                #         self.bias_shape, minval=1, maxval=self.timesteps - 1))
                bias_init = -bias_init
            else:
                bias_init = tf.ones(self.bias_shape)
            setattr(  # TODO: smart initialization of these
                self, self.weight_dict['O']['r']['bias'],
                tf.get_variable(name=self.weight_dict['O']['r']['bias'],
                                dtype=self.dtype,
                                trainable=True,
                                initializer=bias_init))

            # Degree of freedom weights (vectors)
            w_shape = [1, 1, 1, self.k]
            b_shape = [1, 1, 1, self.k]
            # w_array = np.ones(w_shape).astype(np.float32)
            # b_array = np.zeros(b_shape).astype(np.float32)

            # Divisive params
            if self.beta and not self.lesion_beta:
                self.beta = tf.get_variable(
                    name='beta',
                    initializer=initialization.xavier_initializer(
                        shape=w_shape,
                        uniform=self.normal_initializer,
                        mask=None))
                # initializer=tf.ones(w_shape, dtype=tf.float32))
            elif self.lesion_beta:
                self.beta = tf.constant(0.)
            else:
                self.beta = tf.constant(1.)

            if self.nu and not self.lesion_nu:
                self.nu = tf.get_variable(
                    name='nu',
                    initializer=initialization.xavier_initializer(
                        shape=b_shape,
                        uniform=self.normal_initializer,
                        mask=None))
                # initializer=tf.zeros(b_shape, dtype=tf.float32))
            elif self.lesion_nu:
                self.nu = tf.constant(0.)
            else:
                self.nu = tf.constant(1.)
            if self.zeta:
                self.zeta = tf.get_variable(
                    name='zeta',
                    initializer=initialization.xavier_initializer(
                        shape=w_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            else:
                self.zeta = tf.constant(1.)
            if self.gamma:
                self.gamma = tf.get_variable(
                    name='gamma',
                    initializer=initialization.xavier_initializer(
                        shape=w_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            else:
                self.gamma = tf.constant(1.)
            # # TODO
            # self.ebias = tf.get_variable(
            #     name='ebias',
            #     initializer=initialization.xavier_initializer(
            #         shape=b_shape,
            #         uniform=self.normal_initializer,
            #         mask=None))

            if self.xi:
                self.xi = tf.get_variable(
                    name='xi',
                    initializer=initialization.xavier_initializer(
                        shape=w_shape,
                        uniform=self.normal_initializer,
                        mask=None))
            else:
                self.xi = tf.constant(1.)
            if self.multiplicative_excitation:
                if self.lesion_kappa:
                    self.kappa = tf.constant(0.)
                else:
                    self.kappa = tf.get_variable(
                        name='kappa',
                        initializer=initialization.xavier_initializer(
                            shape=w_shape,
                            uniform=self.normal_initializer,
                            mask=None))
                    # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)

                if self.lesion_omega:
                    self.omega = tf.constant(0.)
                else:
                    self.omega = tf.get_variable(
                        name='omega',
                        initializer=initialization.xavier_initializer(
                            shape=w_shape,
                            uniform=self.normal_initializer,
                            mask=None))
                    # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)
            else:
                self.kappa = tf.constant(1.)
                self.omega = tf.constant(1.)
            if self.adapation:
                self.rho = tf.get_variable(name='rho',
                                           initializer=tf.ones(
                                               self.timesteps,
                                               dtype=tf.float32))
            if self.lesion_omega:
                self.omega = tf.constant(0.)
            if self.lesion_kappa:
                self.kappa = tf.constant(0.)
            self.lateral_bias = tf.get_variable(
                name='lateral_bias',
                initializer=initialization.xavier_initializer(
                    shape=b_shape, uniform=self.normal_initializer, mask=None))