Beispiel #1
0
def visualize_correctness(n=25, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    (reservoir_input, expected_output), _ =\
        glob_load(working_dir + '*-dataset')[0]
    rbn_reservoir, _ = glob_load(working_dir + '*-reservoir')[0]
    readout, _ = glob_load(working_dir + '*-readout')[0]

    rbn_reservoir.reset_state()
    flow = mdp.Flow([rbn_reservoir, readout], verbose=1)

    actual_output = flow.execute(reservoir_input)
    for output in actual_output:
        output[0] = 1 if output[0] > 0.5 else 0

    errors = sum(actual_output != expected_output)
    accuracy = 1 - float(errors) / len(actual_output)

    plt.title('Reservoir performance')
    plt.plot(actual_output[:n], 'y', linewidth=1.5)
    plt.plot(expected_output[:n], 'b', linewidth=1.5)
    plt.legend(['Actual output', 'Expected output'])

    plt.savefig('temp-2.pdf', bbox_inches='tight')
def load_rbns_from_erb(directory, remember):
    rbn_distribution, name = glob_load(directory + '*distribution')[0]

    scatterplot = ["x y"]

    for accuracy, rbn in rbn_distribution:
        cc = measure_computational_capability(rbn, 100, remember)
        scatterplot.append("{} {}".format(cc, accuracy))

    distribution = {}
    distplot = ["\\myboxplot{"]
    for accuracy, rbn in rbn_distribution:
        ic = rbn.input_connectivity
        if ic not in distribution:
            distribution[ic] = []

        distribution[ic].append(accuracy)

    for i, l in enumerate(sorted(distribution.keys())):
        distplot.append("% L: {}".format(i))
        distplot.append("\\addplot[boxplot]")
        distplot.append("table[row sep=\\\\, y index=0] {")
        distplot.append("data")
        for fitness in distribution[l]:
            distplot.append("{} \\\\".format(fitness))

        distplot.append("};")

    distplot.append("}}{{{}}}".format(10.0 / max(distribution.keys())))

    return '\n'.join(scatterplot), '\n'.join(distplot)
Beispiel #3
0
def load_rbns_from_erb(directory, remember):
    rbn_distribution, name = glob_load(directory + '*distribution')[0]

    scatterplot = ["x y"]

    for accuracy, rbn in rbn_distribution:
        cc = measure_computational_capability(rbn, 100, remember)
        scatterplot.append("{} {}".format(cc, accuracy))

    distribution = {}
    distplot = ["\\myboxplot{"]
    for accuracy, rbn in rbn_distribution:
        ic = rbn.input_connectivity
        if ic not in distribution:
            distribution[ic] = []

        distribution[ic].append(accuracy)

    for i, l in enumerate(sorted(distribution.keys())):
        distplot.append("% L: {}".format(i))
        distplot.append("\\addplot[boxplot]")
        distplot.append("table[row sep=\\\\, y index=0] {")
        distplot.append("data")
        for fitness in distribution[l]:
            distplot.append("{} \\\\".format(fitness))

        distplot.append("};")

    distplot.append("}}{{{}}}".format(10.0 / max(distribution.keys())))

    return '\n'.join(scatterplot), '\n'.join(distplot)
Beispiel #4
0
def load_rbns_from_ea():
    working_dir = get_working_dir()
    ea_runs = map(fst, glob_load(working_dir + '*-evolved'))

    best_genomes = map(lst, ea_runs)
    rbns = [genotype_to_phenotype(genome, 100, 2) for genome in best_genomes]

    return best_genomes, rbns
def load_rbns_from_ea():
    working_dir = get_working_dir()
    ea_runs = map(fst, glob_load(working_dir + '*-evolved'))

    best_genomes = map(lst, ea_runs)
    rbns = [genotype_to_phenotype(genome, 100, 2)
            for genome in best_genomes]

    return best_genomes, rbns
Beispiel #6
0
def visualize_rbn_state(n=100, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    rbn, rbn_name = glob_load(working_dir + '*-reservoir')[0]
    rbn.reset_state()

    if not user_denies('Perturb?'):
        test_data, _ = glob_load(working_dir + '*-dataset')[0]
        test_input, _ = test_data
        test_input = test_input[:n]
    else:
        test_input = np.zeros((n, 1))
        rbn.should_perturb = False

    rbn_states = rbn._execute(test_input)

    plt.matshow(rbn_states, cmap=plt.cm.gray)
    plt.axis('off')
    #plt.xlabel('State of node n in RBN')
    #plt.gca().xaxis.set_label_position('top')
    #plt.ylabel('Time')

    plt.savefig(raw_input('Name: '), bbox_inches='tight')
Beispiel #7
0
def visualize_dataset(n=30, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    test_dataset, filename = glob_load(working_dir + '*-dataset')[0]

    dataset_meta = re.search(r"\[(.*)\]", filename).groups()[0]

    reservoir_input = np.transpose(test_dataset[0][:n])
    expected_output = np.transpose(test_dataset[1][:n])

    plt.matshow(reservoir_input, cmap=plt.cm.gray)
    plt.axis('off')
    plt.savefig('plots/' + dataset_meta + '-input.pdf', bbox_inches='tight')

    plt.matshow(expected_output, cmap=plt.cm.gray)
    plt.axis('off')
    plt.savefig('plots/' + dataset_meta + '-output.pdf', bbox_inches='tight')

    plt.show()
#    lower whisker={}
#}},
#] coordinates {{}};""".format(l, i, median, upperq, lowerq, upperw, lowerw)
#
#        print boxplot
#
#    print "}}{{{}}}".format(10.0 / max(distribution.keys()))


if __name__ == '__main__':
    #filename = "pickle_dumps/distribution-100-5-3/combined-distribution"
    #computational_power_scatter()
    #distribution_to_plot()
    #erb()

    rbn, _ = glob_load('pickle_dumps/70input-2/*-reservoir')[0]
    ccs = [measure_computational_capability(rbn, 100, 3) for _ in range(20)]
    print ccs, np.median(ccs), np.mean(ccs)

    #postfix = default_input('Postfix?', '-3-1')
    #remember = int(postfix.split("-")[1]) - 1

    #scatter, box = load_rbns_from_erb('pickle_dumps/c-distribution-100' + postfix + '/', remember)

    #print scatter

    #sc = open('pickle_dumps/c-distribution-100' + postfix +
    #          '/computational-power-100' + postfix + '.dat', 'w')
    #sc.write(scatter)
    #sc.close()
Beispiel #9
0
#    upper whisker={},
#    lower whisker={}
#}},
#] coordinates {{}};""".format(l, i, median, upperq, lowerq, upperw, lowerw)
#
#        print boxplot
#
#    print "}}{{{}}}".format(10.0 / max(distribution.keys()))

if __name__ == '__main__':
    #filename = "pickle_dumps/distribution-100-5-3/combined-distribution"
    #computational_power_scatter()
    #distribution_to_plot()
    #erb()

    rbn, _ = glob_load('pickle_dumps/70input-2/*-reservoir')[0]
    ccs = [measure_computational_capability(rbn, 100, 3) for _ in range(20)]
    print ccs, np.median(ccs), np.mean(ccs)

    #postfix = default_input('Postfix?', '-3-1')
    #remember = int(postfix.split("-")[1]) - 1

    #scatter, box = load_rbns_from_erb('pickle_dumps/c-distribution-100' + postfix + '/', remember)

    #print scatter

    #sc = open('pickle_dumps/c-distribution-100' + postfix +
    #          '/computational-power-100' + postfix + '.dat', 'w')
    #sc.write(scatter)
    #sc.close()
                                     input_connectivity=input_connectivity)

    return rbn_reservoir


if __name__ == '__main__':
    # Set pickle working dir
    working_dir = get_working_dir()

    log.setup(logging.DEBUG, path=working_dir)
    log_git_info()

    # Create datasets
    use_existing_dataset = user_confirms('Use existing dataset in folder?')
    if use_existing_dataset:
        test_dataset, _ = glob_load(working_dir + '*-dataset')[0]
        dataset_description = '[dataset_from_folder]'
    else:
        datasets, dataset_description = create_dataset()
        training_dataset, test_dataset = datasets[:-1], datasets[-1]

    if not use_existing_dataset and not user_denies('Pickle test dataset?'):
        dump(test_dataset, dataset_description + '-dataset',
             folder=working_dir)

    # Create or load reservoir and readout layer
    if user_confirms('Use readout layer from folder?'):
        readout, _ = glob_load(working_dir + '*readout')[0]
    else:
        rbn_reservoir = create_reservoir()
        readout = Oger.nodes.RidgeRegressionNode(
Beispiel #11
0
                                     input_connectivity=input_connectivity)

    return rbn_reservoir


if __name__ == '__main__':
    # Set pickle working dir
    working_dir = get_working_dir()

    log.setup(logging.DEBUG, path=working_dir)
    log_git_info()

    # Create datasets
    use_existing_dataset = user_confirms('Use existing dataset in folder?')
    if use_existing_dataset:
        test_dataset, _ = glob_load(working_dir + '*-dataset')[0]
        dataset_description = '[dataset_from_folder]'
    else:
        datasets, dataset_description = create_dataset()
        training_dataset, test_dataset = datasets[:-1], datasets[-1]

    if not use_existing_dataset and not user_denies('Pickle test dataset?'):
        dump(test_dataset,
             dataset_description + '-dataset',
             folder=working_dir)

    # Create or load reservoir and readout layer
    if user_confirms('Use readout layer from folder?'):
        readout, _ = glob_load(working_dir + '*readout')[0]
    else:
        rbn_reservoir = create_reservoir()