Ejemplo n.º 1
0
        return valid_loss

    hyperloss(transform_parser.vect, 0, record_results=True)
    return results['train_loss'][-1], results['valid_loss'][-1]


def plot():
    import matplotlib.pyplot as plt
    import matplotlib as mpl
    with open('results.pkl') as f:
        train_loss, valid_loss = zip(*pickle.load(f))

    fig = plt.figure(0)
    fig.set_size_inches((6, 4))
    ax = fig.add_subplot(111)
    ax.set_title('Performance vs weight_sharing')
    ax.plot(all_script_corr, train_loss, 'o-', label='train_loss')
    ax.plot(all_script_corr, valid_loss, 'o-', label='valid_loss')
    ax.set_xlabel('Weight sharing')
    ax.set_ylabel('Negative log prob')
    ax.legend(loc=1, frameon=False)
    plt.savefig('performance.png')


all_script_corr = np.linspace(0, 1, 10)
if __name__ == '__main__':
    results = omap(run, all_script_corr)
    with open('results.pkl', 'w') as f:
        pickle.dump(results, f, 1)
    plot()
Ejemplo n.º 2
0
    d_losses = zip(*[d_loss for loss, d_loss in data])

    fig = plt.figure(0)
    fig.clf()
    fig.set_size_inches((8,8))
    ax = fig.add_subplot(211)
    for loss_curve, N_iter in zip(losses, all_N_iters):
        ax.plot(all_log_alpha_0, loss_curve, 'o-', label="{0} iters".format(N_iter))
    ax.set_title("Loss vs step size")
    ax.set_ylim([-0, 3])
    ax.set_ylabel("Negative log loss per datum")
    ax.set_xlabel(xlabel)
    # ax.legend(loc=4)
    ax = fig.add_subplot(212)
    for d_loss_curve, N_iter in zip(d_losses, all_N_iters):
        ax.plot(all_log_alpha_0, d_loss_curve, 'o-', label="{0} iters".format(N_iter))
    ax.set_title("Grad loss vs step size")
    ax.set_ylim([-0.5, 0.5])
    ax.set_ylabel("Negative log loss per datum")
    ax.set_xlabel(xlabel)
    ax.legend(loc=2)
    plt.savefig("/tmp/fig.png")
    plt.savefig("fig.png")

if __name__ == '__main__':
    results = omap(run, range(N_oiter))
    # results = collect_results(225866836343)
    with open('results.pkl', 'w') as f:
        pickle.dump(results, f)
    plot()
Ejemplo n.º 3
0
            results['train_loss'].append(total_loss(train_data, z_vect_final))
            # results['tests_loss'].append(total_loss(tests_data, z_vect_final))
        return valid_loss

    hyperloss(transform_parser.vect, 0, record_results=True)
    return results['train_loss'][-1], results['valid_loss'][-1]

def plot():
    import matplotlib.pyplot as plt
    import matplotlib as mpl
    with open('results.pkl') as f:
         train_loss, valid_loss = zip(*pickle.load(f))

    fig = plt.figure(0)
    fig.set_size_inches((6,4))
    ax = fig.add_subplot(111)
    ax.set_title('Performance vs weight_sharing')
    ax.plot(all_script_corr, train_loss, 'o-', label='train_loss')
    ax.plot(all_script_corr, valid_loss, 'o-', label='valid_loss')
    ax.set_xlabel('Weight sharing')
    ax.set_ylabel('Negative log prob')
    ax.legend(loc=1, frameon=False)
    plt.savefig('performance.png')

all_script_corr = np.linspace(0, 1, 10)
if __name__ == '__main__':
    results = omap(run, all_script_corr)
    with open('results.pkl', 'w') as f:
        pickle.dump(results, f, 1)
    plot()
Ejemplo n.º 4
0
def check_omap():
    # This won't work with nosetest. Needs to be run from the same directory as the file.
    ans = omap(identity, objects)
    for x, y in zip(ans, objects):
        assert x == y, "Failed on {0}".format(y)
    print "test ok"
Ejemplo n.º 5
0
def check_omap():
    # This won't work with nosetest. Needs to be run from the same directory as the file.
    ans = omap(identity, objects)
    for x, y in zip(ans, objects):
        assert x == y, "Failed on {0}".format(y)
    print "test ok"
Ejemplo n.º 6
0
        ax.plot(all_log_alpha_0,
                loss_curve,
                'o-',
                label="{0} iters".format(N_iter))
    ax.set_title("Loss vs step size")
    ax.set_ylim([-0, 3])
    ax.set_ylabel("Negative log loss per datum")
    ax.set_xlabel(xlabel)
    # ax.legend(loc=4)
    ax = fig.add_subplot(212)
    for d_loss_curve, N_iter in zip(d_losses, all_N_iters):
        ax.plot(all_log_alpha_0,
                d_loss_curve,
                'o-',
                label="{0} iters".format(N_iter))
    ax.set_title("Grad loss vs step size")
    ax.set_ylim([-0.5, 0.5])
    ax.set_ylabel("Negative log loss per datum")
    ax.set_xlabel(xlabel)
    ax.legend(loc=2)
    plt.savefig("/tmp/fig.png")
    plt.savefig("fig.png")


if __name__ == '__main__':
    results = omap(run, range(N_oiter))
    # results = collect_results(225866836343)
    with open('results.pkl', 'w') as f:
        pickle.dump(results, f)
    plot()