Ejemplo n.º 1
0
def show_net_weights(nn):
    # Visualize the weights of the network
    W1 = nn.params['W1']
    W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
    plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
    plt.gca().axis('off')
    plt.show()
Ejemplo n.º 2
0
def display(set, i=0):
    image = np.reshape(set[0][i], (28, 28))
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    imgplot = ax.imshow(image, cmap=mpl.cm.Greys)
    imgplot.set_interpolation('nearest')
    plt.show()
def plot_histograms_detailed(df, symbol):
    daily_returns = compute_daily_returns(df)
    daily_returns[symbol].hist(bins=20, label=symbol)
    plt.legend(loc="upper right")
    mean = daily_returns[symbol].mean()
    print("mean: \n" + str(mean))
    std = daily_returns[symbol].std()
    print("std: \n" + str(std))
    plt.axvline(mean, color='w', linestyle='dashed', linewidth=2)
    plt.axvline(std, color='r', linestyle='dashed', linewidth=2)
    plt.axvline(-std, color='r', linestyle='dashed', linewidth=2)
    plt.show()
Ejemplo n.º 4
0
def plot(data, labels, net):
    x_min, x_max = data[:, 0].min() - 0.5, data[:, 0].max() + 0.5
    y_min, y_max = data[:, 1].min() - 0.5, data[:, 1].max() + 0.5
    h = 0.01

    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    Z = net.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(data[:, 0], data[:, 1], c=labels, s=10, cmap=plt.cm.Spectral)

    plt.show()
def plot_histograms(df, symbol):
    daily_returns = compute_daily_returns(df)
    daily_returns[symbol].hist(bins=20, label=symbol)
    plt.legend(loc="upper right")
    plt.show()
Ejemplo n.º 6
0
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = load_data(
    cifar_dir, num_test=500)

# ininialize W
W = np.random.randn(3073, 10) * 0.0001

# test loss
loss, grad = softmax_loss_vectorized(W, X_dev, y_dev, 0.0)
#print('loss: %f' % loss)
#print('sanity check: %f' % (-np.log(0.1)))

# test gradient without regularization
#def f(w): return softmax_loss_vectorized(W, X_dev, y_dev, 0.0)[0]
#grad_numerical = grad_check_sparse(f, W, grad, 10)

# test gradient with regularization
#def f(w): return softmax_loss_vectorized(W, X_dev, y_dev, 1e2)[0]
#grad_numerical = grad_check_sparse(f, W, grad, 10)

softmax = Softmax()
loss_history = softmax.train(X_train,
                             y_train,
                             learning_rate=1e-7,
                             reg=5e4,
                             num_iters=1500,
                             verbose=True)
plt.plot(loss_history)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()