Esempio n. 1
0
class CFPLF_OutstarHebbian(CFPLearningFn):
    """
    CFPLearningFunction applying the specified (default is Hebbian)
    single_cf_fn to each CF, where normalization is done in an outstar-manner.

    Presumably does not need a separate output_fn for normalization.

    NOT YET TESTED.
    """
    single_cf_fn = param.ClassSelector(
        LearningFn,
        default=Hebbian(),
        doc="LearningFn that will be applied to each CF individually.")

    outstar_wsum = None

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)
        # avoid evaluating these references each time in the loop
        single_cf_fn = self.single_cf_fn
        outstar_wsum = zeros(input_activity.shape)
        for cf, i in iterator():
            single_cf_fn(cf.get_input_matrix(input_activity),
                         output_activity.flat[i], cf.weights,
                         single_connection_learning_rate)
            # Outstar normalization
            wrows, wcols = cf.weights.shape
            for wr in xrange(wrows):
                for wc in xrange(wcols):
                    outstar_wsum[wr][wc] += cf.weights[wr][wc]

            # CEBHACKALERT: see ConnectionField.__init__()
            cf.weights *= cf.mask
Esempio n. 2
0
class CFSPLF_Plugin(param.Parameterized):
    """CFSPLearningFunction applying the specified single_cf_fn to each Sparse CF."""

    single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),doc="""
        Accepts a LearningFn that will be applied to each CF individually.""")


    def constant_sum_connection_rate(self,n_units,learning_rate):
        """
        Return the learning rate for a single connection assuming that
        the total rate is to be divided evenly among all the units in
        the connection field.
        """
        return float(learning_rate)/n_units


    def __call__(self, projection, **params):
        """Apply the specified single_cf_fn to every sparse CF."""
        single_connection_learning_rate = self.constant_sum_connection_rate(projection.n_units,projection.learning_rate)
        # avoid evaluating these references each time in the loop
        single_cf_fn = self.single_cf_fn

        for cf in projection.flatcfs:
            temp_weights = cf.weights
            single_cf_fn(cf.get_input_matrix(projection.src.activity),
                         projection.dest.activity.flat[cf.oned_idx], temp_weights,
                         single_connection_learning_rate)
            temp_weights *= cf.mask
            cf.weights = temp_weights
Esempio n. 3
0
class CFPLF_Trace(CFPLearningFn):
    """
    LearningFn that incorporates a trace of recent activity,
    not just the current activity.

    Based on P. Foldiak (1991), "Learning Invariance from
    Transformation Sequences", Neural Computation 3:194-200.  Also see
    Sutton and Barto (1981) and Wallis and Rolls (1997).

    Incorporates a decay term to keep the weight vector bounded, and
    so it does not normally require any output_fn normalization for
    stability.

    NOT YET TESTED.
    """

    trace_strength = param.Number(
        default=0.5,
        bounds=(0.0, 1.0),
        doc=
        "How much the learning is dominated by the activity trace, relative to the current value."
    )

    single_cf_fn = param.ClassSelector(
        LearningFn,
        default=Hebbian(),
        doc="LearningFn that will be applied to each CF individually.")

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)
        ##Initialise traces to zero if they don't already exist
        if not hasattr(self, 'traces'):
            self.traces = zeros(output_activity.shape, activity_type)
        for cf, i in iterator():
            unit_activity = output_activity.flat[i]
            #   print "unit activity is",unit_activity
            #    print "self trace is",self.traces[r,c]
            new_trace = (self.trace_strength * unit_activity) + (
                (1 - self.trace_strength) * self.traces.flat[i])
            #     print "and is now",new_trace
            self.traces.flat[i] = new_trace
            cf.weights += single_connection_learning_rate * new_trace * \
                              (cf.get_input_matrix(input_activity) - cf.weights)

            #CEBHACKALERT: see ConnectionField.__init__()
            cf.weights *= cf.mask
Esempio n. 4
0
class CFPLF_PluginScaled(CFPLearningFn):
    """
    CFPLearningFunction applying the specified single_cf_fn to each CF.
    Scales the single-connection learning rate by a scaling factor
    that is different for each individual unit. Thus each individual
    connection field uses a different learning rate.
    """

    single_cf_fn = param.ClassSelector(
        LearningFn,
        default=Hebbian(),
        doc="Accepts a LearningFn that will be applied to each CF individually."
    )

    learning_rate_scaling_factor = param.Parameter(
        default=None,
        doc=
        "Matrix of scaling factors for scaling the learning rate of each CF individually."
    )

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        """Apply the specified single_cf_fn to every CF."""

        if self.learning_rate_scaling_factor is None:
            self.learning_rate_scaling_factor = ones(output_activity.shape)

        single_cf_fn = self.single_cf_fn
        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)

        for cf, i in iterator():
            sc_learning_rate = self.learning_rate_scaling_factor.flat[
                i] * single_connection_learning_rate
            single_cf_fn(cf.get_input_matrix(input_activity),
                         output_activity.flat[i], cf.weights, sc_learning_rate)
            # CEBHACKALERT: see ConnectionField.__init__() re. mask & output fn
            cf.weights *= cf.mask

    def update_scaling_factor(self, new_scaling_factor):
        """Update the single-connection learning rate scaling factor."""
        self.learning_rate_scaling_factor = new_scaling_factor
Esempio n. 5
0
class CFPLF_Hebbian_cython(CFPLF_Plugin):
    """Same as CFPLF_Plugin(single_cf_fn=Hebbian()); just for non-optimized fallback."""
    single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),readonly=True)
Esempio n. 6
0
class HomeoSynaptic(CFPLearningFn):
    """
    Learning function using homeostatic synaptic scaling from
    Sullivan & de Sa, "Homeostatic Synaptic Scaling in Self-Organizing Maps",
    Neural Networks (2006), 19(6-7):734-43.

    Does not necessarily require output_fn normalization for stability.
    """
    single_cf_fn = param.ClassSelector(
        LearningFn,
        default=Hebbian(),
        doc="LearningFn that will be applied to each CF individually")

    beta_n = param.Number(default=0.01,
                          bounds=(0, None),
                          doc="homeostatic learning rate")

    beta_c = param.Number(
        default=0.005,
        bounds=(0, None),
        doc="time window over which the neuron's firing rate is averaged")

    activity_target = param.Number(default=0.1,
                                   bounds=(0, None),
                                   doc="Target average activity")

    #debug = param.Boolean(default=False,doc="Print average activity values")
    #beta_n = param.Number(default=0.00033,bounds=(0,None),doc="Homeostatic learning rate") #Too small?
    #beta_c = param.Number(default=0.000033,bounds=(0,None),doc="Time window over which the neuron's firing rate is averaged")

    def __init__(self, **params):
        super(HomeoSynaptic, self).__init__(**params)
        self.temp_hist = []
        self.ave_hist = []

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        """
        Update the value of the given weights matrix based on the
        input_activity matrix (of the same size as the weights matrix)
        and the response of this unit (the unit_activity), governed by
        a per-connection learning rate.
        """
        if not hasattr(self, 'averages'):
            self.averages = ones(output_activity.shape, Float) * 0.1

            # normalize initial weights to 1.0
            for cf, i in iterator():
                current_norm_value = 1.0 * Numeric.sum(abs(cf.weights.ravel()))
                if current_norm_value != 0:
                    factor = (1.0 / current_norm_value)
                    cf.weights *= factor

        # compute recent average of output activity
        self.averages = self.beta_c * output_activity + (
            1.0 - self.beta_c) * self.averages
        activity_norm = 1.0 + self.beta_n * \
           ((self.averages - self.activity_target)/self.activity_target)

        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)

        # avoid evaluating these references each time in the loop
        single_cf_fn = self.single_cf_fn
        for cf, i in iterator():
            single_cf_fn(cf.get_input_matrix(input_activity),
                         output_activity.flat[i], cf.weights,
                         single_connection_learning_rate)

            # homeostatic normalization
            cf.weights /= activity_norm.flat[i]

            # CEBHACKALERT: see ConnectionField.__init__()
            cf.weights *= cf.mask

        # For analysis only; can be removed (in which case also remove the initializations above)


# CEBALERT: I changed [0][7] to [0]!
        self.ave_hist.append(self.averages.flat[0])
        self.temp_hist.append(
            Numeric.sum(abs(iterator.flatcfs[0].weights.ravel())))
class CFPLF_Trace_opt(CFPLearningFn):
    """
    Optimized version of CFPLF_Trace; see projfn.py for more info
    """

    trace_strength = param.Number(default=0.5,
                                  bounds=(0.0, 1.0),
                                  doc="""
       How much the learning is dominated by the activity trace, relative to the current value."""
                                  )

    single_cf_fn = param.ClassSelector(
        LearningFn,
        default=Hebbian(),
        readonly=True,
        doc="LearningFn that will be applied to each CF individually.")

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        cfs = iterator.flatcfs  # pyflakes:ignore (passed to weave C code)
        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)
        irows, icols = input_activity.shape

        if single_connection_learning_rate == 0:
            return

        cfs = iterator.flatcfs
        num_cfs = len(cfs)  # pyflakes:ignore (passed to weave C code)

        ##Initialise traces to zero if they don't already exist
        if not hasattr(self, 'traces'):
            self.traces = zeros(output_activity.shape, activity_type)

        self.traces = (self.trace_strength * output_activity) + (
            (1 - self.trace_strength) * self.traces)
        traces = self.traces  # pyflakes:ignore (passed to weave C code)
        cf_type = iterator.cf_type  # pyflakes:ignore (passed to weave C code)
        code = c_header + """
            DECLARE_SLOT_OFFSET(weights,cf_type);
            DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
            DECLARE_SLOT_OFFSET(mask,cf_type);
            DECLARE_SLOT_OFFSET(_norm_total,cf_type);
            DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);

            %(cfs_loop_pragma)s
            for (int r=0; r<num_cfs; ++r) {
                double load = traces[r];
                if (load != 0) {
                    load *= single_connection_learning_rate;
                    PyObject *cf = PyList_GetItem(cfs,r);

                    LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
                    LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
                    LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);

                    UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);

                    double total = 0.0;

                    // modify non-masked weights
                    npfloat *inpj = input_activity+icols*rr1+cc1;
                    for (int i=rr1; i<rr2; ++i) {
                        npfloat *inpi = inpj;
                        for (int j=cc1; j<cc2; ++j) {
                            // The mask is floating point, so we have to
                            // use a robust comparison instead of testing
                            // against exactly 0.0.
                            if (*(mask++) >= MASK_THRESHOLD) {
                                *weights += load * *inpi;
                                total += fabs(*weight);
                            }
                            ++weights;
                            ++inpi;
                        }
                        inpj += icols;
                    }
                    // store the sum of the cf's weights
                    LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
                    _norm_total[0]=total;
                    LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
                    _has_norm_total[0]=1;
                }
            }
        """ % c_decorators

        inline(code, [
            'input_activity', 'traces', 'num_cfs', 'icols', 'cfs',
            'single_connection_learning_rate', 'cf_type'
        ],
               local_dict=locals(),
               headers=['<structmember.h>'])
class CFPLF_Scaled_opt(CFPLF_PluginScaled):
    """
    CF-aware Scaled Hebbian learning rule.

    Implemented in C for speed.  Should be equivalent to
    CFPLF_PluginScaled(single_cf_fn=Hebbian), except faster.

    As a side effect, sets the norm_total attribute on any cf whose
    weights are updated during learning, to speed up later operations
    that might depend on it.
    """
    single_cf_fn = param.ClassSelector(LearningFn,
                                       default=Hebbian(),
                                       readonly=True)

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):

        if self.learning_rate_scaling_factor is None:
            self.learning_rate_scaling_factor = ones(
                output_activity.shape) * 1.0
        learning_rate_scaling_factor = self.learning_rate_scaling_factor  # pyflakes:ignore (passed to weave C code)

        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)
        if single_connection_learning_rate == 0:
            return

        cfs = iterator.flatcfs
        num_cfs = len(cfs)  # pyflakes:ignore (passed to weave C code)
        irows, icols = input_activity.shape
        cf_type = iterator.cf_type  # pyflakes:ignore (passed to weave C code)

        sheet_mask = iterator.get_sheet_mask(
        )  # pyflakes:ignore (passed to weave C code)

        code = c_header + """
            DECLARE_SLOT_OFFSET(weights,cf_type);
            DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
            DECLARE_SLOT_OFFSET(mask,cf_type);
            DECLARE_SLOT_OFFSET(_norm_total,cf_type);
            DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);

            %(cfs_loop_pragma)s
            for (int r=0; r<num_cfs; ++r) {
                double load = output_activity[r];
                double a = learning_rate_scaling_factor[r];
                load = load * a;
                if (load != 0 && sheet_mask[r] != 0) {
                    load *= single_connection_learning_rate;

                    PyObject *cf = PyList_GetItem(cfs,r);

                    LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
                    LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
                    LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);

                    UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);

                    double total = 0.0;

                    // modify non-masked weights
                    npfloat *inpj = input_activity+icols*rr1+cc1;
                    for (int i=rr1; i<rr2; ++i) {
                        npfloat *inpi = inpj;
                        for (int j=cc1; j<cc2; ++j) {
                            // The mask is floating point, so we have to
                            // use a robust comparison instead of testing
                            // against exactly 0.0.
                            if (*(mask++) >= MASK_THRESHOLD) {
                                *weights += load * *inpi;
                                total += fabs(*weights);
                            }
                            ++weights;
                            ++inpi;
                        }
                        inpj += icols;
                    }
                    // store the sum of the cf's weights
                    LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
                    _norm_total[0]=total;
                    LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
                    _has_norm_total[0]=1;
                }
            }
        """ % c_decorators

        inline(code, [
            'input_activity', 'learning_rate_scaling_factor',
            'output_activity', 'sheet_mask', 'num_cfs', 'icols', 'cfs',
            'single_connection_learning_rate', 'cf_type'
        ],
               local_dict=locals(),
               headers=['<structmember.h>'])
class CFPLF_Hebbian_opt(CFPLearningFn):
    """
    CF-aware Hebbian learning rule.

    Implemented in C for speed.  Should be equivalent to
    CFPLF_Plugin(single_cf_fn=Hebbian), except faster.

    As a side effect, sets the norm_total attribute on any cf whose
    weights are updated during learning, to speed up later operations
    that might depend on it.

    May return without modifying anything if the learning rate turns
    out to be zero.
    """
    single_cf_fn = param.ClassSelector(LearningFn,
                                       default=Hebbian(),
                                       readonly=True)

    def __call__(self, iterator, input_activity, output_activity,
                 learning_rate, **params):
        single_connection_learning_rate = self.constant_sum_connection_rate(
            iterator.proj_n_units, learning_rate)
        if single_connection_learning_rate == 0:
            return

        cfs = iterator.flatcfs
        num_cfs = len(cfs)  # pyflakes:ignore (passed to weave C code)
        irows, icols = input_activity.shape
        cf_type = iterator.cf_type  # pyflakes:ignore (passed to weave C code)

        # CEBALERT: this function *always* skips inactive units,
        # because it uses the output_activity directly rather than
        # going through the iterator. That's ok since we know this
        # function can always skip inactive units. But the unoptimized
        # equivalent should be made to do the same, because right now
        # it respects the iterator.  (Just a case of setting the
        # iterator's active_units_mask to be True before calling the
        # iterator in the unoptimized version.)

        sheet_mask = iterator.get_sheet_mask(
        )  # pyflakes:ignore (passed to weave C code)

        code = c_header + """
            DECLARE_SLOT_OFFSET(weights,cf_type);
            DECLARE_SLOT_OFFSET(input_sheet_slice,cf_type);
            DECLARE_SLOT_OFFSET(mask,cf_type);
            DECLARE_SLOT_OFFSET(_norm_total,cf_type);
            DECLARE_SLOT_OFFSET(_has_norm_total,cf_type);

            %(cfs_loop_pragma)s
            for (int r=0; r<num_cfs; ++r) {
                double load = output_activity[r];
                if (load != 0 && sheet_mask[r] != 0) {
                    load *= single_connection_learning_rate;

                    PyObject *cf = PyList_GetItem(cfs,r);

                    LOOKUP_FROM_SLOT_OFFSET(float,weights,cf);
                    LOOKUP_FROM_SLOT_OFFSET(int,input_sheet_slice,cf);
                    LOOKUP_FROM_SLOT_OFFSET(float,mask,cf);

                    UNPACK_FOUR_TUPLE(int,rr1,rr2,cc1,cc2,input_sheet_slice);

                    double total = 0.0;

                    // modify non-masked weights
                    npfloat *inpj = input_activity+icols*rr1+cc1;
                    for (int i=rr1; i<rr2; ++i) {
                        npfloat *inpi = inpj;
                        for (int j=cc1; j<cc2; ++j) {
                            // The mask is floating point, so we have to
                            // use a robust comparison instead of testing
                            // against exactly 0.0.
                            if (*(mask++) >= MASK_THRESHOLD) {
                                *weights += load * *inpi;
                                total += fabs(*weights);
                            }
                            ++weights;
                            ++inpi;
                        }
                        inpj += icols;
                    }
                    // store the sum of the cf's weights
                    LOOKUP_FROM_SLOT_OFFSET(double,_norm_total,cf);
                    _norm_total[0]=total;
                    LOOKUP_FROM_SLOT_OFFSET(int,_has_norm_total,cf);
                    _has_norm_total[0]=1;
                }
            }
        """ % c_decorators

        inline(code, [
            'input_activity', 'output_activity', 'sheet_mask', 'num_cfs',
            'icols', 'cfs', 'single_connection_learning_rate', 'cf_type'
        ],
               local_dict=locals(),
               headers=['<structmember.h>'])