def quantitative_plot(patterns, bias=None):
    FLIPPED = 30
    x = []
    y = []
    for n in xrange(1, len(patterns) + 1):
        x.append(n)
        considered_patterns = patterns[:n]
        if bias:
            weights = biasedLearn(considered_patterns)
        else:
            weights = utils.learn(considered_patterns)
        recovered = 0
        for p in considered_patterns:
            for trial in xrange(10):
                noisy = utils.flipper(p, FLIPPED)
                for _ in xrange(10 * len(p)):
                    if bias:
                        biasedUpdateOne(weights, noisy, bias)
                    else:
                        utils.updateOne(weights, noisy)
                    if utils.samePattern(noisy, p):
                        break

            if utils.samePattern(noisy, p):
                recovered += 1
        y.append(recovered)

    plt.plot(x, y)
    plt.title("Evolution of capacity with the number of patterns")
    plt.show()
Esempio n. 2
0
def main():
    print('Loading dataset...')
    # data is: exam 1 score, exam 2 score, bool whether admitted
    frame = pd.read_csv('ex2data1.csv', header=None)
    data = frame.values
    x_mat = data[:, 0:2]  # exam scores
    y = data[:, 2:3]  # admitted or not

    # normalize input (input has large values which causes sigmoid to always be 1 or 0)
    x_mean = np.mean(x_mat, axis=0)
    x_std = np.std(x_mat, axis=0)
    x_norm = (x_mat - x_mean) / x_std

    # add intercept
    x_norm = np.insert(x_norm, 0, 1, axis=1)

    # Learn model
    print('starting to learn...')
    (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1)
    print('Final loss %s' % loss[-1])
    print('Final theta \n%s' % theta)

    # predict for student
    joe = np.array([[45, 85]])
    joe_norm = (joe - x_mean) / x_std
    joe_norm = np.insert(joe_norm, 0, 1, axis=1)
    p = utils.sigmoid(joe_norm.dot(theta))
    print('Student with grades %s and %s has admission probability: %s' % (45, 85, p[0, 0]))

    # Predict on train set
    prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5)
    actual = (y == 1)
    predict_success = np.sum(prediction == actual)
    print('Model evaluation on training set has success of %s/%s' % (predict_success, y.shape[0]))

    # calc decision boundary
    # The decision boundary is the threshold line that separates true/false predictions,
    # this means that on this line the prediction is exactly 0.5, meaning:
    # p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0
    # so our line equation is: theta0 + theta1*x1 + theta2*x2 = 0
    # x2 = -theta0 / theta2 - (theta1/theta2)*x1
    theta = theta.flatten()

    # calc 2 points on the line
    plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])])
    plot_y = -1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x

    # denormalize the points
    plot_x = plot_x * x_std[0] + x_mean[0]
    plot_y = plot_y * x_std[1] + x_mean[1]

    plot_data(x_mat, y, plot_x, plot_y)
    utils.plot_loss(loss)

    plt.show()
def small_patterns():
    patterns = [x1, x2, x3]
    weights = utils.learn(patterns)

    # Testing that the patterns are "fixpoints"
    for i, pattern in enumerate(patterns):
        updated_pattern = utils.update(weights, pattern)
        if utils.samePattern(pattern, updated_pattern):
            print "* Pattern #{} is a fixpoint, as expected.".format(i + 1)
    print

    # Test if the network will recall stored patterns from distorted versions
    NUM_TRIALS = 100
    for n in xrange(1, 4):
        print "# Recovering from {} flip(s):".format(n)
        for i, pattern in enumerate(patterns):
            success = 0
            for _ in xrange(NUM_TRIALS):
                distorted_pattern = utils.flipper(pattern, n)
                for j in xrange(500):
                    utils.updateOne(weights, distorted_pattern)

                if utils.samePattern(pattern, distorted_pattern):
                    success += 1
            print "  - Pattern #{}: {}/{} recoveries were succesful.".format(
                i + 1, success, NUM_TRIALS)
        print

    # Finding unexpected attractors
    attractors = set()
    for i in xrange(1000):
        pattern = utils.rndPattern(len(patterns[0]))
        for _ in xrange(100):
            utils.updateOne(weights, pattern)
        if not any(np.all(pattern == p) for p in patterns):
            attractors.add(tuple(pattern.tolist()))

    print "# Unexpected attractors:"
    print '\n'.join(map(str, attractors))
    print

    print "'Small patterns' experiment succesfull!"
def run(x_norm, y, x_mean, x_std, _lambda):
    # Learn model
    print('starting to learn with lambda=%s...' % _lambda)
    (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1, _lambda)
    print('Final loss %s' % loss[-1])
    print('Final theta \n%s' % theta)

    utils.plot_loss(loss, reg_loss, 'lambda=' + str(_lambda))

    # Create the decision boundary, we create a plane filled with 100 points ranging from -1 to 1.5 on both axes
    # then for each point we calculate the model value (the "z" that goes to the sigmoid), then the decision
    # boundary is the contour where values are changed from negative to positive (e.g like edge detection)
    print('Visualizing decision boundary')
    u = np.linspace(-1, 1.5, 50)
    v = np.linspace(-1, 1.5, 50)
    plane = np.zeros((u.size, v.size))
    for i in range(u.size):
        for j in range(v.size):
            feats = utils.map_features(u[i:i + 1], v[j:j + 1], 6, False)
            feats_norm = (feats - x_mean) / x_std
            feats_norm = np.insert(feats_norm, 0, 1, axis=1)
            plane[i, j] = feats_norm.dot(theta)

    return plane
Esempio n. 5
0
def learn_chords(chord_names, play_func):
	learn(chord_names, play_func)
Esempio n. 6
0
# model = ppo2.Model(policy=policy_network,
#                        ob_space=env.gym.observation_space,
#                        ac_space=env.gym.action_space,
#                        nbatch_act=nenvs,
#                        nbatch_train=nbatch_train,
#                        nsteps=nsteps,
#                        ent_coef=0.01,
#                        vf_coef=vf_coef,
#                        max_grad_norm=max_grad_norm)

num_timesteps = 1000000

learn(policy=policy_network, env=env, nsteps=128, nminibatches=32,
        lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
        ent_coef=.01,
        lr=lambda f : f * 2.5e-4,
        cliprange=lambda f : f * 0.1,
        total_timesteps=int(num_timesteps * 1.1))

# policy = model.act_model
# # Run Episodes
# for i_episode in range(EPISODES):
#     observation = env.reset()
#     for t in range(MAX_EPISODE_LENGTH):
#         actions = [policy.step(obs) for obs in observation]
#         actions = [action[0] for action in actions]
#         # print(actions)
#         observation, reward, done = env.step(actions)
#         if done[env.gym.training_agent]:
#             print("Episode finished after {} timesteps".format(t+1))
#             break
Esempio n. 7
0
def learn_chords(play_func):
    learn(CHORD_NAMES, play_func)
def restoring_images():
    patterns = [figs.p1, figs.p2, figs.p3]
    weights = utils.learn(patterns)

    print "! Pattern recovery ( 1 & 2 )"
    for i, (original, noisy) in enumerate([(figs.p1, figs.p11),
                                           (figs.p2, figs.p22)]):
        noisy = np.array(noisy)
        show_pattern(noisy, title="Noisy pattern #{}".format(i + 1))
        for _ in xrange(10000):
            utils.updateOne(weights, noisy)
        show_pattern(noisy, title="Recovered pattern #{}".format(i + 1))
        if utils.samePattern(noisy, original):
            print "  . Correctly recovered pattern {}".format(i + 1)
        else:
            print "  . Couldn't recover pattern {}".format(i + 1)
    print
    #sequential_hopfield(weights, figs.p22, figs.p2, num_iter=3000, display=300)

    # Testing recovering distorted patterns
    pattern = figs.p1
    attractors = set()
    print "! Pattern recovery with varying distortion:"
    for n in xrange(1, len(pattern) - 1, 10):
        print "  * n = {}/{}".format(n, len(pattern))
        for trial in xrange(10):
            noisy = utils.flipper(pattern, n)
            for l in xrange(20000):
                utils.updateOne(weights, noisy)
                if l % 1000 == 0 and utils.samePattern(pattern, noisy):
                    break
            attractors.add(tuple(noisy.tolist()))
            if utils.samePattern(pattern, noisy):
                break

        if utils.samePattern(pattern, noisy):
            print "   . Correctly recovered the pattern (on at least one of the trials)"
        else:
            print "   * Couldn't recover the pattern, stopping."
            break

    # Energy at the different attractors
    x = Counter()
    for attr in attractors:
        energy = utils.energy(weights, np.array(attr))
        x[energy] += 1
    plt.plot(x.keys(), x.values(), 'b.')
    plt.title("Energy at different attractors")
    plt.show()

    # Studying the change of energy at each iteration
    noisy = utils.flipper(figs.p1, 40)
    iterations = 5000
    iterations = range(iterations)
    energies = []
    for iteration in iterations:
        energies.append(utils.energy(weights, noisy))
        utils.updateOne(weights, noisy)
    plt.plot(iterations, energies, '-b')
    plt.title("Evolution of the energy at each iteration")
    plt.show()
def capacity_benchmarks(patterns,
                        force_recovery=False,
                        updates=200,
                        ntrials=10,
                        bias=[0],
                        plot=False):
    if force_recovery:
        print "! Capacity benchmarks: pattern_length={} updates={}, attempts={}".format(
            len(patterns[0]),
            len(patterns[0]) * 10, ntrials)
    else:
        print "! Capacity benchmarks: pattern_length={}".format(
            len(patterns[0]))

    for b in bias:
        if b != 0:
            print "=> BENCHMARKS: bias={}".format(b)
        # Increasing pattern memory
        for i in range(1, len(patterns) + 1):
            recovery_failure = [0] * i
            if b == 0:
                weights = utils.learn(patterns[:i])
            else:
                weights = biasedLearn(patterns[:i])
            nmin = len(patterns[0]) + 1
            pmin = None

            # Applying benchmark on each pattern stored
            for p in xrange(i):
                pattern = patterns[p]
                recovered = False

                # Increasing pattern noise
                for n in range(1, len(patterns[0]) + 1):
                    if not force_recovery:
                        # Random noise
                        noisy_pattern = utils.flipper(pattern, n)
                        # Pattern recovery
                        noisy_pattern = utils.update(weights, noisy_pattern)

                        if not utils.samePattern(pattern, noisy_pattern):
                            recovery_failure[p] = n
                            break

                    else:
                        recovered = False

                        # Multiple attemps if failure
                        for t in xrange(ntrials):
                            # Random noise
                            noisy_pattern = utils.flipper(pattern, n)

                            # Pattern recovery
                            for j in xrange(len(patterns[0]) * 10):
                                if b == 0:
                                    utils.updateOne(weights, noisy_pattern)
                                else:
                                    biasedUpdateOne(weights, noisy_pattern, b)

                            if utils.samePattern(pattern, noisy_pattern):
                                recovered = True
                                break
                            else:
                                if n < nmin:
                                    nmin = n
                                    pmin = p + 1
                                recovery_failure[p] = n

                        if not recovered:
                            break

            if force_recovery:
                print(
                    "{} stored - All patterns recovered until {} (p{} failed) - Last failure at {} by p{}\n"
                    + "First attempt failed by p{} at {}\nDetails: {}").format(
                        i, min(recovery_failure),
                        recovery_failure.index(min(recovery_failure)),
                        max(recovery_failure),
                        recovery_failure.index(max(recovery_failure)), nmin,
                        pmin, recovery_failure)
            else:
                print "{} stored - All patterns recovered until {} (p{} failed) - Last failure at {} by p{}\nDetails: {}".format(
                    i, min(recovery_failure),
                    recovery_failure.index(min(recovery_failure)),
                    max(recovery_failure),
                    recovery_failure.index(max(recovery_failure)),
                    recovery_failure)
Esempio n. 10
0
def learn_intervals(play_func):
    learn(INTERVAL_NAMES, play_func)