示例#1
0
def curvature_freq_test():

    crv = get_curvature_frequency_based_curve(3.0 / 2.0)

    # get curvature
    curvature, ang_sections = get_ang_indexed_curvature_of_t_indexed_curve(
        crv, interp_kind='linear')
    ang = np.linspace(0.0, 16 * np.pi, len(curvature[0]))

    ###
    ### debug
    ###
    curvature_t = get_curvature_of_t_indexed_curve(crv)
    ang_t = get_continuous_ang(crv)

    plt.ion()

    fig = plt.figure()
    ax = fig.add_subplot(111)
    # ax.plot(np.arange(len(ang_t)), ang_t, '-*')
    # ax.plot(np.arange(len(curvature_t)), curvature_t)
    ax.plot(ang, np.log(curvature[0]), 'r', linewidth=3.5)
    ax.hold(True)
    plt.draw()
    raw_input()
    ###
    ###
    ###
    crv_reconstruct = get_trajectory_from_ang_curvature_parameterization(
        ang, curvature[0], dt=0.005)

    ax_original = utils.display_data([[crv]])
    ax_reconstruct = utils.display_data([[crv_reconstruct]])

    #see the frequency analysis and a reconstruction with perturbation in frequency domain
    freq_bins = fftfreq(len(ang), ang[1] - ang[0]) * 2 * np.pi
    log_curvature_freq = get_curvature_fft_transform(np.log(curvature[0]))

    ax_freq = display_frequency(sp=log_curvature_freq, freq=freq_bins)

    #corrupt frequency basis coefficients
    log_curvature_freq[2] += np.random.randn() * 0.2
    ax_freq.plot(freq_bins, log_curvature_freq)
    #reconstruct from the corrupted frequency
    corrupt_log_curvature = get_curvature_inv_fft_transform(log_curvature_freq)
    ax.plot(ang, corrupt_log_curvature)

    curvature_recons = np.exp(corrupt_log_curvature)
    crv_corrupt_reconstruct = get_trajectory_from_ang_curvature_parameterization(
        ang[0:200], curvature_recons[0:200], dt=0.005)
    ax_corrupt_reconstruct = utils.display_data([[crv_corrupt_reconstruct]])

    return
def curvature_freq_test():

    crv = get_curvature_frequency_based_curve(3.0/2.0)

    # get curvature
    curvature, ang_sections = get_ang_indexed_curvature_of_t_indexed_curve(crv, interp_kind='linear')
    ang = np.linspace(0.0, 16*np.pi, len(curvature[0]))

    ###
    ### debug
    ###
    curvature_t = get_curvature_of_t_indexed_curve(crv)
    ang_t = get_continuous_ang(crv)

    plt.ion()

    fig = plt.figure()
    ax = fig.add_subplot(111)
    # ax.plot(np.arange(len(ang_t)), ang_t, '-*')
    # ax.plot(np.arange(len(curvature_t)), curvature_t)
    ax.plot(ang, np.log(curvature[0]), 'r', linewidth=3.5)
    ax.hold(True)
    plt.draw()
    raw_input()
    ###
    ###
    ###
    crv_reconstruct = get_trajectory_from_ang_curvature_parameterization(ang, curvature[0], dt=0.005)

    ax_original = utils.display_data([[crv]])
    ax_reconstruct = utils.display_data([[crv_reconstruct]])

    #see the frequency analysis and a reconstruction with perturbation in frequency domain
    freq_bins = fftfreq(len(ang), ang[1]-ang[0])*2*np.pi
    log_curvature_freq = get_curvature_fft_transform(np.log(curvature[0]))
    
    ax_freq = display_frequency(sp=log_curvature_freq, freq=freq_bins)

    #corrupt frequency basis coefficients
    log_curvature_freq[2] += np.random.randn() * 0.2
    ax_freq.plot(freq_bins, log_curvature_freq)
    #reconstruct from the corrupted frequency
    corrupt_log_curvature = get_curvature_inv_fft_transform(log_curvature_freq)
    ax.plot(ang, corrupt_log_curvature)

    curvature_recons = np.exp(corrupt_log_curvature)
    crv_corrupt_reconstruct = get_trajectory_from_ang_curvature_parameterization(ang[0:200], curvature_recons[0:200], dt=0.005)
    ax_corrupt_reconstruct = utils.display_data([[crv_corrupt_reconstruct]])

    return
import numpy as np
from scipy.io import loadmat
from utils import display_data, one_vs_all, predict_ova

data = loadmat("data/ex3data1.mat")
X = data['X']
y = data['y']

m, n = X.shape
indices = np.random.permutation(m)
data_points = X[indices[0:100], :]

display_data(data_points)

num_labels = 10
lambd = 0.1
all_theta = one_vs_all(X, y, num_labels, lambd)

pred = predict_ova(all_theta, X)

print(f'Train Accuracy: {np.mean(pred == y) * 100:.1f}%')
示例#4
0
# ============= Visualizing data ============== #

print('Loading data...')

data = loadmat('data/ex4/ex4data1.mat')

X = data['X']
Y = data['y']

m = X.shape[0]
idx_array = np.arange(m)
rand_indxs = np.random.choice(idx_array, size=100, replace=False)

print('Plotting example digits...')
display_data(X[rand_indxs, :])

input('Press enter to continue...')

# ============= Loading NN parameters ============= #

print('Loading Neural Network weights...')

weights = loadmat('data/ex4/ex4weights.mat')

W1 = weights['Theta1'].flatten(order='F')[:, np.newaxis]
W2 = weights['Theta2'].flatten(order='F')[:, np.newaxis]

#import pdb; pdb.set_trace()

nn_params = np.vstack((W1, W2))
示例#5
0
transform = transforms.ToTensor()

svhn_train = datasets.SVHN(root='data/',
                           split='train',
                           download=True,
                           transform=transform)

batch_size = 128
num_workers = 0

train_loader = DataLoader(dataset=svhn_train,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)

display_data(train_loader)

conv_size = 32
z_size = 100

D = Discriminator(conv_size)
G = Generator(z_size, conv_size)

cuda = False

if torch.cuda.is_available():
    cuda = True
    D = D.cuda()
    G = G.cuda()

lr = 0.0002
from utils import randInitializeWeights, Unroll_weights, Train_network
from utils import forward_prop, Mean_classification_error, cross_entropy_loss
from utils import Tanh, TanhGradient, sigmoidGradient, sigmoid, ReLu, ReLuGradient, Load_data, display_data
import matplotlib.pyplot as plt
import numpy as np

if __name__ == "__main__":
    # ---------------Load & Visualize the training data------------------------
    train_file_path = './digitstrain.txt'
    val_file_path = './digitsvalid.txt'
    test_file_path = './digitstest.txt'
    X_train, Y_train = Load_data(train_file_path)
    X_val, Y_val = Load_data(val_file_path)
    X_test, Y_test = Load_data(test_file_path)
    display_data(X_train)

    #-------------------------------Network Architecture-----------------------
    input_layer_size = 784
    hidden_layer_size = [100]  # List Sizes of the hidden layer
    n_hidden = 1
    num_labels = 10

    # -------------------------Set Activation Function---------------------------
    activ_func = Tanh  # can be sigmoid, ReLu, Tanh
    activ_Grad_func = TanhGradient  # can be sigmoidGradient, ReLuGradient, TanhGradient

    #----------------------------- Hyper Parameters -----------------------
    epochmax = 50
    LearningRate = 0.01
    reg_lambda = 0.001
    momentum = 0.1
示例#7
0
# ========== Loading and Visualizing data ========== #

print('Loading data...')

data = loadmat('data/ex3/ex3data1.mat')

X = data['X']
Y = data['y']

m = X.shape[0]
idx_array = np.arange(m)
rand_indxs = np.random.choice(idx_array, size=100, replace=False)

print('Plotting example digits...')
display_data(X[rand_indxs, :])

input('Press enter to continue...')

# ============= Loading weights for the NN ============= #

weights = loadmat('data/ex3/ex3weights.mat')
w1 = weights['Theta1']
w2 = weights['Theta2']

# ============= Making Predictions ============== #
nn = NeuralNetwork(X, Y, w1, w2)

preds = nn.predict()
preds += 1