Exemplo n.º 1
0
    def __init__(self, **kwargs):
        """
        Set up equation parameters in the network and load the data to
        construct the semantic layer.
        For a human-readable format of equations refer to the paper.

        Input
        -----
            nr_words:    maximal allowed number of guesses, default 3
            stim_len:       length of stimulus and winner activity, default 50
        """
        # maximal number of nodes visited in the first layer
        self.nr_words = kwargs.get('nr_words', 4)

        # stimulus length and the duration of activity for a WTA unit
        self.stim_len = kwargs.get('stim_len', 50)

        # weight pruning
        self.theta = kwargs.get('theta', 0)
        print('Theta %.3f' % self.theta)
        print('Words %d' % self.nr_words)

        # connection matrix with associative strengths
        self.W, self.ids, self.voc = load_vocabulary()

        # remove weights less than threshold
        if self.theta > 0:
            print('Removing connections...')
            self.W = np.where(self.W > self.theta, self.W, 0)

        # number of units
        self.N = np.alen(self.W)

        assert self.N == len(self.ids) == len(self.voc)

        self.rho_w = 1 - 0.995  # integration constant for the WTA layer

        self.c1 = 1.  # excitatory connection
        self.c2 = 1.  # normalization constant
        self.c3 = 1.  # inhibitory neuron constant
        self.c4 = 50.  # inhibitory layer constant
        self.c5 = 0.1  # noise amplitude

        self.noise_offset = 0.05 / self.c5

        self.theta_w = 1.  # threshold for the WTA layer
        self.I_amp = 1.  # amplitude of the clamped imput
Exemplo n.º 2
0
    def __init__(self, **kwargs):
        """
        Set up equation parameters in the network and load the data to
        construct the semantic layer.
        For a human-readable format of equations refer to the paper.

        Input
        -----
            nr_words:    maximal allowed number of guesses, default 3
            stim_len:       length of stimulus and winner activity, default 50
        """
        # maximal number of nodes visited in the first layer
        self.nr_words = kwargs.get('nr_words', 4)

        # stimulus length and the duration of activity for a WTA unit
        self.stim_len = kwargs.get('stim_len', 50)

        # weight pruning
        self.theta = kwargs.get('theta', 0)
        print('Theta %.3f' % self.theta)
        print('Words %d' % self.nr_words)

        # connection matrix with associative strengths
        self.W, self.ids, self.voc = load_vocabulary()

        # remove weights less than threshold
        if self.theta > 0:
            print('Removing connections...')
            self.W = np.where(self.W > self.theta, self.W, 0)

        # number of units
        self.N = np.alen(self.W)

        assert self.N == len(self.ids) == len(self.voc)

        self.rho_w = 1-0.995  # integration constant for the WTA layer

        self.c1 = 1.        # excitatory connection
        self.c2 = 1.        # normalization constant
        self.c3 = 1.        # inhibitory neuron constant
        self.c4 = 50.       # inhibitory layer constant
        self.c5 = 0.1       # noise amplitude

        self.noise_offset = 0.05/self.c5

        self.theta_w = 1.    # threshold for the WTA layer
        self.I_amp = 1.      # amplitude of the clamped imput
Exemplo n.º 3
0
def simulate_test(**kwargs):
    # Load the problem set
    net = Network(**kwargs)

    path_test = os.path.join(os.path.dirname(__file__), os.pardir, 'data',
            'processed', 'rat_items')
    items = np.loadtxt(path_test, dtype=np.character)
    W, ids, voc = load_vocabulary()

    nr_items = len(items)
    positions = np.zeros(nr_items, dtype=np.int)

    all_responses = []

    for idx in range(nr_items):
        rat_item = items[idx]
        cues, target = rat_item[:3], rat_item[3]
        target_id = voc[target]

        net.setup_problem(cues, target)

        # if WTA fails to pick a winner, try again
        ok = True
        while ok:
            try:
                net.run()
                ok = False
            except BaseException:
                print 'WTA failed, retry the simulation'
                continue

        responses = list(net.visited())
        all_responses.append(responses)

        try:
            # position of the solution
            target_position = responses.index(target_id)
        except ValueError:
            # problem not solved
            target_position = -1

        print idx, rat_item, target_position

        positions[idx] = target_position

    return np.array(positions, dtype=np.int), all_responses
Exemplo n.º 4
0
def simulate_test(**kwargs):
    # Load the problem set
    path_test = '../data/processed/rat_items'
    items = np.loadtxt(path_test, dtype=np.character)

    # Model parameters
    theta = kwargs.get('theta', 0)
    nr_words = kwargs.get('nr_words', 13)

    W, ids, voc = load_vocabulary()

    nr_items = len(items)
    positions = np.zeros(nr_items, dtype=np.int)
    # was = WAS()

    all_responses = []

    for idx in range(nr_items):
        rat_item = items[idx]
        nodes = [voc[word] for word in rat_item]

        target = voc[rat_item[3]]

        activations, responses = spread_activity(init_nodes=nodes[:3],
                                                 target=target,
                                                 W=W,
                                                 max_visited=nr_words,
                                                 threshold=theta)
        all_responses.append(responses)

        # response position
        try:
            target_position = responses.index(target)
        except ValueError:
            target_position = -1

        # print idx, rat_item, target_position

        positions[idx] = target_position

    return np.array(positions, dtype=np.int), all_responses
Exemplo n.º 5
0
def simulate_test(**kwargs):
    # Load the problem set
    path_test = '../data/processed/rat_items'
    items = np.loadtxt(path_test, dtype=np.character)

    # Model parameters
    theta = kwargs.get('theta', 0)
    nr_words = kwargs.get('nr_words', 13)

    W, ids, voc = load_vocabulary()

    nr_items = len(items)
    positions = np.zeros(nr_items, dtype=np.int)
    # was = WAS()

    all_responses = []

    for idx in range(nr_items):
        rat_item = items[idx]
        nodes = [voc[word] for word in rat_item]

        target = voc[rat_item[3]]

        activations, responses = spread_activity(init_nodes=nodes[:3],
                                                 target=target,
                                                 W=W,
                                                 max_visited=nr_words,
                                                 threshold=theta)
        all_responses.append(responses)

        # response position
        try:
            target_position = responses.index(target)
        except ValueError:
            target_position = -1

        # print idx, rat_item, target_position

        positions[idx] = target_position

    return np.array(positions, dtype=np.int), all_responses
        if j > -1:
            visited.append(j)

        if j == target:
            break

        j = WTA(activations, visited, j, activated)

        counter += 1

    assert len(visited) == max_visited or j == target

    return activations, visited

if __name__ == "__main__":
    W, ids, voc = load_vocabulary()

    cues = ['river', 'note', 'account']
    target = 'bank'

    cues_id = [voc[cue] for cue in cues]

    activations, order = spread_activity(init_nodes=cues_id,
                                         target=voc[target],
                                         W=W,
                                         threshold=.0,
                                         max_visited=10)
    for i, ord_idx in enumerate(order):
        print i+1, ids[ord_idx], activations[ord_idx]
Exemplo n.º 7
0
        solutions = np.where(positions < nr_w, positions, -1)

        performance[i, :3] = get_difficulties(solutions)
        percent_correct = 100 * len(np.where(solutions > -1)[0]) / nr_items
        performance[i, 3] = percent_correct

    return performance


if __name__ == "__main__":
    font = {'family': 'serif', 'serif': 'Times New Roman', 'size': 28}
    legend_fs = 24
    matplotlib.rc('font', **font)

    # Association data, needed for statistics below
    W, ids, voc = load_vocabulary()
    weights = W[W.nonzero()]

    # problem difficulty labeling
    difficulties = [0, 1, 2]
    labs = ['easy', 'mid', 'hard']

    lw = 4
    colors = ['#4D4D4D', '#808080', '#CCCCCC', '#000000']
    alphas = [.4, .4, .6]
    ymin, ymax = -2, 105

    fig = pl.figure(figsize=(22, 6), dpi=80, facecolor="white")

    #   ----------------- Vary number of words -----------------
    axes = pl.subplot(121)