コード例 #1
0
    def hrr_from_string():
        """
        Uses expression, names, query_vectors from wrapper function

        Args:

        """

        vocab = hrr.Vocabulary(D, unitary=unitary_names)

        for n, v in names.iteritems():
            vocab.add(n, v)

        try:
            h = eval(expression, {}, vocab)
        except Exception as e:
            print 'Error evaluating HRR string ' + original
            raise e

        if normalize:
            h.normalize()

        if not as_hrr:
            h = h.v

        return h
コード例 #2
0
def hrr_noise(D, num):

    noise_vocab = hrr.Vocabulary(D)
    keys = [noise_vocab.parse(str(x)) for x in range(2 * num + 1)]

    def f(input_vec):

        input_vec = hrr.HRR(data=input_vec)
        partner_key = random.choice(keys)

        pair_keys = filter(lambda x: x != partner_key, keys)

        pairs = random.sample(pair_keys, 2 * num)
        p0 = (pairs[x] for x in range(0, len(pairs), 2))
        p1 = (pairs[x] for x in range(1, len(pairs), 2))
        S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)

        S = reduce(lambda x, y: x + y, S,
                   noise_vocab[partner_key].convolve(input_vec))
        S.normalize()

        reconstruction = S.convolve(~noise_vocab[partner_key])
        reconstruction.normalize()

        return reconstruction.v

    return f
コード例 #3
0
ファイル: core.py プロジェクト: w6hu/nengo
 def ensure_vocab(self, name, module):
     d = module.get_param('dimensions')
     align = module.get_param('align_hrrs')
     if not self.default_vocabs.has_key((d, align)):
         self.default_vocabs[(d,
                              align)] = hrr.Vocabulary(d,
                                                       randomize=not align,
                                                       max_similarity=0.04)
     self.vocabs[name] = self.default_vocabs[(d, align)]
コード例 #4
0
 def add_source(self, module, node, name=None, vocab=None):
     module_name = self.get_module_name(module)
     if name is None: 
         name = module_name
     if vocab is None:
         dims = self.get_param_value('dimensions', module)
         rand = self.get_param_value('randomize_vectors', module)
         key=(dims, rand)
         vocab=self.vocabs.get(key, None)
         if vocab is None:
             vocab=hrr.Vocabulary(dims, randomize=rand)
             self.vocabs[key]=vocab
     alias='source_%s'%name
     self.net.set_alias(alias, '%s.%s'%(module_name, node))
     self.sources[name]=vocab
コード例 #5
0
ファイル: basalganglia_rule.py プロジェクト: w6hu/nengo
def make(net,
         node,
         index=0,
         dim=8,
         pattern='I',
         pstc=0.01,
         use_single_input=False):
    STN = node.getNode('STN')

    transform = numeric.zeros((STN.dimension, dim), 'f')

    if dim in hrr.Vocabulary.defaults.keys():
        vocab = hrr.Vocabulary.defaults[dim]
    else:
        vocab = hrr.Vocabulary(dim)

    terms = [t.name for t in node.terminations]
    STNterms = [t.name for t in STN.terminations]

    count = 0
    while 'rule_%02d' % count in terms or 'rule_%02d' % count in STNterms:
        count = count + 1

    name = 'rule_%02d' % count

    transform[index, :] = vocab.parse(pattern).v

    if use_single_input:
        input = node.getNode('input')
        input.addDecodedTermination(name, transform, pstc, False)
        node.exposeTermination(input.getTermination(name), name)
    else:
        StrD1 = node.getNode('StrD1')
        StrD2 = node.getNode('StrD2')

        STN.addDecodedTermination(name, transform, pstc, False)
        node.exposeTermination(STN.getTermination(name), name + '_STN')
        StrD1.addDecodedTermination(name, transform * (0.8), pstc, False)
        node.exposeTermination(StrD1.getTermination(name), name + '_StrD1')
        StrD2.addDecodedTermination(name, transform * (1.2), pstc, False)
        node.exposeTermination(StrD2.getTermination(name), name + '_StrD2')
コード例 #6
0
ファイル: hrrgraph.py プロジェクト: mcchong/nengo_1.4
    def __init__(self, view, name, func, args=(), filter=True, ylimits=(-1.0, 1.0), label=None, nodeid=None):
        self.fixed_value = None
        self.cache = {}
        self.cache_dt_tau = None
        self.cache_tick_count = 0

        graph.Graph.__init__(self, view, name, func, args=args, filter=filter, ylimits=ylimits, split=False, neuronmapped=False, label=label)

        self.border_top = 30

        self.vocab = hrr.Vocabulary.registered.get(nodeid, None)
        if self.vocab is None:
            dim = len(self.data.get_first())
            if dim in hrr.Vocabulary.defaults.keys():
                self.vocab = hrr.Vocabulary.defaults[dim]
            else:
                self.vocab = hrr.Vocabulary(dim)

        self.hidden_pairs = None
        self.normalize = True
        self.popup_normalize = JCheckBoxMenuItem('normalize', self.normalize, stateChanged=self.toggle_normalize)
        self.popup.add(self.popup_normalize)
        self.smooth_normalize = False
        self.popup_smooth = JCheckBoxMenuItem('smooth normalize', self.smooth_normalize, stateChanged=self.toggle_smooth_normalize)
        self.smooth_normalize_threshold = 0.4
        self.popup.add(self.popup_smooth)

        self.show_pairs = False
        self.popup_pairs = JCheckBoxMenuItem('show pairs', self.show_pairs, stateChanged=self.toggle_show_pairs)
        self.popup.add(self.popup_pairs)

        self.show_graph = True
        self.popup_graph = JCheckBoxMenuItem('show graph', self.show_graph, stateChanged=self.toggle_show_graph)
        self.popup.add(self.popup_graph)
        self.font_height_50 = None

        self.popup_set = JMenuItem('set value', actionPerformed=self.set_value)
        self.popup.add(self.popup_set)
        self.popup_release = JMenuItem('release value', actionPerformed=self.release_value)
        self.popup_release.setEnabled(False)
        self.popup.add(self.popup_release)
コード例 #7
0
    def hrr_noise(input_vec):
        noise_vocab = hrr.Vocabulary(D)
        keys = [noise_vocab.parse(str(x)) for x in range(2*num+1)]

        input_vec = hrr.HRR(data=input_vec)
        partner_key = random.choice(keys)

        pair_keys = filter(lambda x: x != partner_key, keys)

        pairs = random.sample(pair_keys, 2 * num)
        p0 = (pairs[x] for x in range(0,len(pairs),2))
        p1 = (pairs[x] for x in range(1,len(pairs),2))
        S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)

        S = reduce(lambda x, y: x + y, S, noise_vocab[partner_key].convolve(input_vec))
        S.normalize()

        vec_hrr = S.convolve(~noise_vocab[partner_key])
        similarity = vec_hrr.compare(input_vec)
        similarities.append(similarity)
        return vec_hrr.v
コード例 #8
0
    def hrr_noise_from_string(input_vec):
        """
        Uses expression, names, query_vectors from wrapper function

        Args:

        input_vec -- the vector to add noise to. Can be an HRR vector or a
                     numpy ndarry.  Returns a noisy vector of the same type
                     as input_vec.

        """

        use_ndarray = type(input_vec) == np.ndarray
        if use_ndarray:
            input_vec = hrr.HRR(data=input_vec)

        vocab = hrr.Vocabulary(D, unitary=unitary_names)

        for n, v in names.iteritems():
            vocab.add(n, v)

        vocab.add(placeholder, input_vec)

        try:
            h = eval(expression, {}, vocab)
        except Exception as e:
            print 'Error evaluating HRR string ' + original
            raise e

        if normalize:
            h.normalize()

        vocab.add('h', h)

        noisy = eval('h * ~(' + ' * '.join(query_vectors) + ')', {}, vocab)

        if use_ndarray:
            noisy = noisy.v

        return noisy
コード例 #9
0
ファイル: question-memory.py プロジェクト: travisfw/nengo
D = 20
N_input = 300
N_mem = 50
N_conv = 70

import nef
import nef.convolution
import hrr
import math
import random

vocab = hrr.Vocabulary(D, max_similarity=0.1)

net = nef.Network('Question Answering with Memory (pre-built)', seed=11)
net.make('A', N_input, D)
net.make('B', N_input, D)
net.make('C', N_input, D)
net.make('D', N_input, D)
net.make('E', N_input, D)

net.make_array(
    'Memory', N_mem, D, radius=1.0 / math.sqrt(D), storage_code='%d'
)  #This is the same as constructing the memory using the integrator template (400 neurons over 20 dimensions).

conv1 = nef.convolution.make_convolution(net, 'Bind', 'A', 'B', 'D', N_conv)
conv2 = nef.convolution.make_convolution(net,
                                         'Unbind',
                                         'Memory',
                                         'C',
                                         'E',
                                         N_conv,
コード例 #10
0
import nef.nef_theano as nef
import nef.convolution
import hrr

D = 10

vocab = hrr.Vocabulary(D, include_pairs=True)
vocab.parse('a+b+c+d+e')

net = nef.Network('Convolution')  #Create the network object

net.make('A', 300, D)  #Make a population of 300 neurons and 10 dimensions
net.make('B', 300, D)
net.make('C', 300, D)

conv = nef.convolution.make_convolution(net, '*', 'A', 'B', 'C', 100)
#Call the code to construct a convolution network using
#the created populations and 100 neurons per dimension

net.run(1)  # run for 1 second
コード例 #11
0
import hrr, nef

dimensions = 256

vocab = hrr.Vocabulary(dimensions)  # create vectors

# this is gonna be provided by the vision module
# TODO: hack this into matrix and string creator

# TODO: fix the symbols of the start_matrix
start_matrix = [[1, 2, 3], [2, 3, 1], [3, 1, 2]]

symbols = ['BLANK', 'A', 'B', 'C']

# TODO: automatically create the following
rows = ['R1', 'R2', 'R3']
columns = ['C1', 'C2', 'C3']


def create_repr(matrix):
    m_repr = dict()
    # columns
    for i in range(len(start_matrix)):
        s = []
        for n, j in enumerate(start_matrix[i]):
            s.append(columns[n] + '*' + symbols[j])
        m_repr[rows[i]] = '+'.join(s)
    # rows
    for i in range(len(start_matrix[0])):
        s = []
        for n, j in enumerate([k[i] for k in start_matrix]):
コード例 #12
0
ファイル: question_control.py プロジェクト: mcchong/nengo_1.4
D = 100  #Number of dimensions
N = 30  #Neurons per dimension
import nef
import hrr

# import templates accessible from the drag-and-drop bar
import nef.templates.integrator as integrator
import nef.templates.binding as binding
import nef.templates.gate as gating
import nef.templates.basalganglia as bgtemplate
import nef.templates.basalganglia_rule as bg_rule

net = nef.Network('Question Answering with Control (pre-built)', seed=15)

# Define the vocabulary of vectors
vocab = hrr.Vocabulary(D)
vocab.parse('CIRCLE+BLUE+RED+SQUARE+QUESTION+STATEMENT')

# Input, output, and intermediate ensembles
visual = net.make_array('Visual', N, D)
channel = net.make_array('Channel', N, D)
net.make_array('Motor', N, D)

# Create the memory
integrator.make(net,
                name='Memory',
                neurons=N * D,
                dimensions=D,
                tau_feedback=0.4,
                tau_input=0.05,
                scale=1)
コード例 #13
0
    def create(self, net, N=50, dimensions=8, randomize=False):
        vocab = {}
        for k in self.nodes.keys():
            node = net.get(k, None)
            if node is None:
                dim = dimensions
                if randomize is False and len(self.nodes[k]) + 1 > dim:
                    dim = len(self.nodes[k]) + 1
                node = net.make_array(k, N, dim)
            if not hrr.Vocabulary.registered.has_key(id(node)):
                v = hrr.Vocabulary(node.dimension, randomize=randomize)
                v.register(node)
            vocab[k] = hrr.Vocabulary.registered[id(node)]

        # ensure all terms are parsed before starting
        for k, v in self.connect.items():
            pre_name, post_name = k
            for pre_term, post_term in v:
                pre = vocab[pre_name].parse(pre_term).v
                post = vocab[post_name].parse(post_term).v

        for k, v in self.connect.items():
            pre_name, post_name = k

            t = numeric.zeros(
                (vocab[post_name].dimensions, vocab[pre_name].dimensions),
                typecode='f')
            for pre_term, post_term in v:
                pre = vocab[pre_name].parse(pre_term).v
                post = vocab[post_name].parse(post_term).v
                t += numeric.array([pre * bb for bb in post])

            if pre_name == post_name:
                if pre_name in self.inhibit:
                    for pre_term in vocab[pre_name].keys:
                        pre = vocab[pre_name].parse(
                            pre_term).v * self.inhibit[pre_name]
                        post_value = numeric.zeros(vocab[post_name].dimensions,
                                                   typecode='f')
                        for post_term in vocab[pre_name].keys:
                            if pre_term != post_term:
                                post_value += vocab[post_name].parse(
                                    post_term).v
                        t += numeric.array([pre * bb for bb in post_value])
                if pre_name in self.excite:
                    t += numeric.eye(len(t)) * self.excite[pre_name]

            net.connect(net.get(pre_name), net.get(post_name), transform=t)

        for i, (pre, post) in enumerate(self.ands):
            D = len(pre)
            node = net.make('and%02d' % i, D * N, D)
            for j, p in enumerate(pre):
                t = numeric.zeros((D, vocab[p[0]].dimensions), typecode='f')
                t[j, :] = vocab[p[0]].parse(p[1]).v * math.sqrt(D)
                net.connect(net.get(p[0]), node, transform=t)

            def result(x, v=vocab[post[0]].parse(post[1]).v):
                for xx in x:
                    if xx < 0.4:
                        return [0] * len(
                            v)  #TODO: This is pretty arbitrary....
                return v

            net.connect(node, net.get(post[0]), func=result)

        return net
コード例 #14
0
ファイル: Tracker4.py プロジェクト: tcstewar/nengo_extract
    thal = spa2.Thalamus(bg)
    
    input = spa2.Input(0.01, vision='0*READY')
    input.next(0.1, vision='READY')
    input.next(0.15, vision='RIGHT')
    input.next(0.15, vision='ZIP')
    input.next(0.14, vision='BACK')
    input.next(0.15, vision='GO')
    input.next(10, vision='0*ZIP')
    
########### Main Network ##########

# Construct the network
net = nef.Network('Tracker', fixed_seed=3)

vocab = hrr.Vocabulary(D, max_similarity = 0.1, include_pairs = False, 
        unitary = ["ADD1", "P0"])
vocab.add('P1', vocab.parse('P0*ADD1'))
vocab.add('P2', vocab.parse('P1*ADD1'))

tracker=Tracker(net, vocab=vocab)

net.connect('memory.Serial memory', 'motor.Memory input')
net.connect('vision.Visual SP', 'memory.Vision input')
net.connect('position_gen.Current position', 'memory.Position input')
net.connect('position_gen.Current position', 'motor.Position input')

##Test inputs
net.make_array('Vision input', 1, D, mode='direct')
net.connect('Vision input', 'vision.Visual SP')

コード例 #15
0
def make(net,
         node,
         index=0,
         dimensions=8,
         pattern='I',
         pstc=0.01,
         use_single_input=False):
    STN = node.getNode('STN')

    transform = numeric.zeros((STN.dimension, dimensions), 'f')

    if dimensions in hrr.Vocabulary.defaults.keys():
        vocab = hrr.Vocabulary.defaults[dimensions]
    else:
        vocab = hrr.Vocabulary(dimensions)

    terms = [t.name for t in node.terminations]
    STNterms = [t.name for t in STN.terminations]

    count = 0
    while 'rule_%02d' % count in terms or 'rule_%02d' % count in STNterms:
        count = count + 1

    name = 'rule_%02d' % count

    transform[index, :] = vocab.parse(pattern).v

    if use_single_input:
        input = node.getNode('input')
        input.addDecodedTermination(name, transform, pstc, False)
        node.exposeTermination(input.getTermination(name), name)
    else:
        StrD1 = node.getNode('StrD1')
        StrD2 = node.getNode('StrD2')

        STN.addDecodedTermination(name, transform, pstc, False)
        node.exposeTermination(STN.getTermination(name), name + '_STN')
        StrD1.addDecodedTermination(name, transform * (0.8), pstc, False)
        node.exposeTermination(StrD1.getTermination(name), name + '_StrD1')
        StrD2.addDecodedTermination(name, transform * (1.2), pstc, False)
        node.exposeTermination(StrD2.getTermination(name), name + '_StrD2')

    if net.network.getMetaData("bgrule") == None:
        net.network.setMetaData("bgrule", HashMap())

    bgrules = net.network.getMetaData("bgrule")

    rule = HashMap(6)
    rule.put("name", node.getName())
    rule.put("index", index)
    rule.put("dimensions", dimensions)
    rule.put("pattern", pattern)
    rule.put("pstc", pstc)
    rule.put("use_single_input", use_single_input)

    bgrules.put(node.getName(), rule)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(node.getName())
コード例 #16
0
def make_learnable_cleanup(D=16,
                           cleanup_neurons=1000,
                           num_vecs=4,
                           threshold=(-0.9, 0.9),
                           max_rate=(100, 200),
                           radius=1.0,
                           cleanup_pstc=0.001,
                           neurons_per_dim=50,
                           clean_learning=False,
                           trial_length=100,
                           learning_noise=0.6,
                           testing_noise=0.3,
                           user_control_learning=False,
                           user_control_bias=False,
                           learning_rate=5e-6,
                           schedule_func=None,
                           neural_input=False,
                           learning_bias=0.0,
                           testing_bias=0.0,
                           **kwargs):
    """
    Construct a cleanup memory that initially has no vocabulary, and learns its vocabulary from the vectors
    it receives as input. Also constructs an experiment node that tests the cleanup. Should probably separate
    that part out eventually.

    :param variable_bias: For using different thresholds during learning and testing. Implemented by a
    decoded-to-nondecoded connection from an ensemble to the cleanup population.

    :type variable_bias: boolean or tuple. If a tuple, first value will be used as threshold during learning,
    second will be used as threshold during testing. If True, user controls threshold (only works with a GUI). If False,
    threshold is fixed at whatever is determined by t_hi and t_lo.

    """

    print cleanup_neurons
    logger = logging.getLogger("make_learnable_cleanup")

    net = nef.Network('learn_cleanup', seed=2)

    vocab = hrr.Vocabulary(D)

    func_str = "def tr(self, x, dimensions=%d, pstc=0.02):\n   self.results = x\n   self.trial_error.append(self.correct_vector - self.results)" % D
    exec func_str in locals()
    ExperimentController.termination_results = tr

    controller = ExperimentController(
        'EC',
        vocab,
        num_vecs,
        clean_learning,
        learning_noise,
        testing_noise,
        learning_bias,
        testing_bias,
        trial_length,
        schedule_func=schedule_func,
        user_control_learning=user_control_learning)

    net.add(controller)

    logger.info("Adding cleanup")

    net.make('cleanup',
             neurons=cleanup_neurons,
             dimensions=D,
             radius=radius,
             intercept=threshold,
             max_rate=max_rate,
             tau_ref=0.004)

    if user_control_bias:
        logger.info("Adding bias controlled by user")
        make_bias(net,
                  'bias',
                  'cleanup',
                  bias=1.0,
                  neurons=1,
                  pstc=cleanup_pstc,
                  direct=True)
        net.make_input('bias_input', [0])
        net.connect('bias_input', 'bias')
    else:
        logger.info("Adding bias controlled by EC")
        make_bias(net,
                  'bias',
                  'cleanup',
                  bias=1.0,
                  neurons=1,
                  pstc=cleanup_pstc,
                  direct=True)
        net.connect(controller.getOrigin('bias'), 'bias')

    logger.info("Adding output")
    net.make('output',
             neurons=neurons_per_dim * D,
             dimensions=D,
             mode='default')

    logger.info("Adding input")
    if neural_input:
        net.make_array('input',
                       neurons=neurons_per_dim,
                       length=D,
                       dimensions=1,
                       mode='default')
    else:
        net.make('input', neurons=1, dimensions=D, mode='direct')

    net.connect(controller.getOrigin('input_vecs'), 'input')
    net.connect('input', 'cleanup', pstc=cleanup_pstc)

    logger.info("Adding error population and learning")
    learning.make(net,
                  errName='error',
                  N_err=neurons_per_dim * D,
                  preName='cleanup',
                  postName='output',
                  rate=learning_rate)
    net.connect(controller.getOrigin('learning_vecs'), 'error', pstc=0.01)

    logger.info("Adding learning gate")
    gating.make(net, name='Gate', gated='error', neurons=40, pstc=0.01)

    net.connect('output', controller.getTermination('results'))

    if user_control_learning:
        logger.info("Adding learning-control switch")
        net.make_input('switch', [1.0])
        net.connect('switch', controller.getTermination('learning_on'))

    net.connect(controller.getOrigin('learning_control'), 'Gate')

    logger.info("Adding network to nengo")
    net.add_to_nengo()

    #if show_stats:
    #    encoders = net.get('cleanup').getEncoders()

    #    sims=[[] for name in names]
    #    hrrs = [vocab.parse(name) for name in names]

    #    for enc in encoders:
    #        h = hrr.HRR(data=enc)

    #        for v, s in zip(hrrs,sims):
    #            s.append(v.compare(h))

    #        sims.append(s)

    #    for v,s, in zip(hrrs,sims):
    #        print "lo"
    #        print len(filter(lambda x: x > t_lo, s))
    #        print "hi"
    #        print len(filter(lambda x: x > t_hi, s))

    #    print sims

    return net
コード例 #17
0
ファイル: digit.py プロジェクト: ctn-archive/tang-2012
        row = [float(x) for x in line.strip().split(',')]
        data.append(row)
    return data


m_inputs = read('mat_test_x.csv')  # the visual stimuli
m_encode = read(
    'mat_encode.csv')  # the exact RBM output value for that stimuli
m_inputsy = read('mat_test_y.csv')  # the category of the stimuli (0-9)

# present the digits in a random order
order = list(range(len(m_inputs)))
random.shuffle(order)

# compute the semantic pointers for each digit by averaging the encoded values for each category
vocab = hrr.Vocabulary(50)
for i, label in enumerate([
        'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT',
        'NINE'
]):
    v = numeric.array([0] * 50, typecode='f')
    count = 0
    for j, yy in enumerate(m_inputsy):
        if yy[0] == i:
            v += m_encode[j]
            count += 1
    sp = hrr.HRR(data=v / count)
    sp.normalize()
    vocab.add(label, sp)
    print label, sp.v
コード例 #18
0
intercepts = [0.4]
percent_shown = 1

pre_n = 500
post_n = 5

trial_length = 2
plot_connection_weights = 0
train_on_hrr = argvals.r

times = []
times.extend([trial_length * 3 for i in range(post_n)])
times.extend([trial_length * 3 for i in range(post_n)])
sim_length = sum(times)

vocab = hrr.Vocabulary(dim, max_similarity=0.05)
post_encoders = []
for i in range(post_n):
    post_encoders.append(vocab.parse("x" + str(i)).v)

post_encoders = np.array(post_encoders)

gens = []
if train_on_hrr:
    gens.extend([
        nf.output(100, False, pe, False, nf.make_hrr_noise(dim, 1))
        for pe in post_encoders
    ])
else:
    gens.extend([nf.output(100, True, pe, False) for pe in post_encoders])
gens.extend([
コード例 #19
0
    def __init__(self,net,productions,dimensions,neurons_buffer=40,neurons_bg=40,neurons_product=300,subdimensions=None,bg_radius=1.5,
                 tau_gaba=0.008,tau_ampa=0.002,noise=None,vocab=None,quick=True,bg_output_weight=-3,bg_same_neurons=True,
                 align_hrr=False,direct_convolution=False,direct_buffer=False,direct_gate=False,direct_same=False,buffer_mode='rate'):
        if vocab is None:
            if dimensions in hrr.Vocabulary.defaults and hrr.Vocabulary.defaults[dimensions].randomize!=align_hrr:
                vocab=hrr.Vocabulary.defaults[dimensions]
            else:
                vocab=hrr.Vocabulary(dimensions,randomize=not align_hrr)
        self.vocab=vocab
        self.net=net
        self.production_count=len(productions.productions)
        self.dimensions=dimensions
        
        self.direct_convolution=direct_convolution
        self.direct_buffer=direct_buffer
        self.direct_gate=direct_gate
        self.direct_same=direct_same

        D=len(productions.productions)        
        bias=net.make_input('prod_bias',[1])
        prod=net.make_array('prod',neurons_bg,D,intercept=(0.2,1),encoders=[[1]],quick=quick)
        net.connect(bias,prod)

        input=[]
        transform=[]
        for k in productions.get_buffers():    
            if self.direct_buffer is True or (isinstance(self.direct_buffer,list) and k in self.direct_buffer):
                buffer=net.make('buffer_'+k,1,dimensions,quick=True,mode='direct')
            else:
                if subdimensions!=None:
                    buffer=net.make_array('buffer_'+k,neurons_buffer*subdimensions,dimensions/subdimensions,dimensions=subdimensions,quick=quick,mode=buffer_mode)
                else:
                    buffer=net.make('buffer_'+k,neurons_buffer*dimensions,dimensions,quick=quick,mode=buffer_mode)
            input.append(buffer)
            transform.append(productions.calc_input_transform(k,vocab))        

        for k in productions.get_same_buffers():
            a,b=k.split('_sameas_',1)
            if self.direct_same:
                dp=net.make('dp_%s_%s'%(a,b),1,1,quick=quick,mode='direct')
            else:    
                dp=net.make('dp_%s_%s'%(a,b),neurons_buffer,1,quick=quick)
            transform.append(productions.calc_input_same_transform(k,vocab))        
            input.append(dp)

            
        basalganglia.make_basal_ganglia(net,input,prod,D,neurons=neurons_bg,input_transform=transform,output_weight=bg_output_weight,noise=noise,radius=bg_radius,same_neurons=bg_same_neurons)
        
        for k in productions.get_same_buffers():
            a,b=k.split('_sameas_',1)
            if self.direct_same:
                same=net.make_array('same_%s_%s'%(a,b),1,dimensions,dimensions=2,quick=quick,mode='direct')
            else:
                same=net.make_array('same_%s_%s'%(a,b),neurons_product*2,dimensions,dimensions=2,quick=quick,encoders=[[1,1],[1,-1],[-1,-1],[-1,1]])

            
            t1=[]
            t2=[]
            for i in range(dimensions):
                m1=numeric.zeros((2,dimensions),typecode='f')
                m2=numeric.zeros((2,dimensions),typecode='f')
                m1[0,i]=1.0
                m2[1,i]=1.0
                for row in m1: t1.append(row)
                for row in m2: t2.append(row)
            
            net.connect('buffer_'+a,same,transform=t1,pstc=tau_ampa)
            net.connect('buffer_'+b,same,transform=t2,pstc=tau_ampa)
            
            def product(x):
                return x[0]*x[1]
            net.connect(same,'dp_%s_%s'%(a,b),func=product,transform=[[1]*dimensions],pstc=tau_ampa)
            
        
        
        

        for k in productions.get_direct_actions():
            if self.direct_buffer:
                net.make('thal_'+k,1,dimensions,quick=True,mode='direct')
            else:    
                net.make('thal_'+k,neurons_buffer*dimensions,dimensions,quick=quick)
            net.connect('thal_'+k,'buffer_'+k,pstc=tau_ampa)
            net.connect(prod,'thal_'+k,transform=productions.calc_output_transform(k,vocab),pstc=tau_ampa)
            
            

        for k in productions.get_transform_actions():
            a,b=k.split('_to_',1)
            name='thal_%s_%s'%(a,b)
            net.make(name,neurons_buffer*dimensions,dimensions,quick=quick)
            net.connect(prod,name,transform=productions.calc_output_transform(k,vocab),pstc=tau_ampa)
            conv=nef.convolution.make_convolution(net,k,name,'buffer_'+a,'buffer_'+b,1,quick=True,mode='direct')


           
        for k in productions.get_gate_actions():
            a,b=k.split('_to_',1)

            if self.direct_gate:
                c=DirectChannel('channel_%s_to_%s'%(a,b),dimensions,pstc_gate=tau_gaba,pstc_input=tau_ampa)
                net.add(c)
                net.connect('buffer_'+a,c.getTermination('input'))
                net.connect(c.getOrigin('X'),'buffer_'+b,pstc=tau_ampa)
            else:
                c=net.make('channel_%s_to_%s'%(a,b),neurons_buffer*dimensions,dimensions,quick=quick)
                net.connect('buffer_'+a,c,pstc=tau_ampa)
                net.connect(c,'buffer_'+b,pstc=tau_ampa)
                c.addTermination('gate',[[-10.0]]*(neurons_buffer*dimensions),tau_gaba,False)

            name='gate_%s_%s'%(a,b)
            net.make(name,neurons_buffer,1,quick=quick,encoders=[[1]],intercept=(0.3,1))
            net.connect('prod',name,transform=productions.calc_output_gates(k,vocab),pstc=tau_ampa)
            net.connect(bias,name)        
            net.connect(name,c.getTermination('gate'))

        for k in productions.get_gate_deconv_actions():
            a,c=k.split('_to_',1)
            a,b=a.split('_deconv_',1)
            
            if self.direct_convolution:
                conv=nef.convolution.make_convolution(net,'%s_deconv_%s_to_%s'%(a,b,c),'buffer_'+a,'buffer_'+b,'buffer_'+c,1,quick=True,invert_second=True,mode='direct',pstc_in=tau_ampa,pstc_out=tau_ampa,pstc_gate=tau_gaba)
            else:
                conv=nef.convolution.make_convolution(net,'%s_deconv_%s_to_%s'%(a,b,c),'buffer_'+a,'buffer_'+b,'buffer_'+c,neurons_product,quick=quick,invert_second=True,pstc_in=tau_ampa,pstc_out=tau_ampa)
                conv.addTermination('gate',[[[-100.0]]*neurons_product]*conv.dimension,tau_gaba,False)

            name='gate_%s_%s_%s'%(a,b,c)
            net.make(name,neurons_buffer,1,quick=quick,encoders=[[1]],intercept=(0.3,1))
            net.connect('prod',name,transform=productions.calc_output_gates(k,vocab),pstc=tau_ampa)
            net.connect(bias,name)        
            net.connect(name,conv.getTermination('gate'))