Пример #1
0
def test_nn_checkgrad():
    import nn
    import config as cfg

    config = cfg.Config('../784.cfg')

    config.num_epochs = 10

    net = nn.NN()
    net.init_net(config)
    net.display()
    net.train()
    net.check_grad()
Пример #2
0
    def __init__(self, q_lr, discount_factor, net_lr=0.01):
        # We ougth to use softmax in this
        # Trying: [64, 256, 256, 128, 64] Layer arcitechture next
        self.policy_net = nn.NN([64, 128, 128, 64, 64], net_lr)

        # This ought to decay
        self.epsilon = 0.6

        # Variables for Q learning
        self.q_lr = q_lr
        self.discount_factor = discount_factor
        self.play_history = []
        self.wins = 0
Пример #3
0
def main():
    annotated = ''
    metadata = ''
    x, y = utils.preprocess(annotated, metadata)
    x = np.array(x)
    nn = neural.NN(learning_rate=.0001,
                   hidden_size=128,
                   input_size=22,
                   output_size=1)
    scores = nn.cross_validate(x, y)
    # this_id = utils.id_gen()
    this_id = utils.simple_id()
    nn.save(this_id + '_weights')
    utils.save_score(this_id + '_stats', scores)
def train(epsilon=0.001, test_path=''):

    network = NetworkInfo(is_train=True, tp=test_path)
    NN = nn.NN(
        num_inputs=network.num_inputs,
        num_hidden=network.num_hidden,
        num_outputs=network.num_outputs,
        hidden_layer_weights=network.hidden_layer_weights,
        hidden_layer_bias=network.hidden_layer_bias,
        output_layer_weights=network.output_layer_weights,
        output_layer_bias=network.output_layer_bias,
        name_path=network.training_name,
    )
    total_error = network.total_error
    count = 0
    while float(total_error) > float(epsilon):
        try:
            training_inputs, training_outputs = random.choice(
                network.training_sets)
            NN.train(training_inputs, training_outputs)
            outputs = NN.feed_forward(training_inputs)
            for i in range(len(outputs)):
                outputs[i] = round(outputs[i])

            if count == 1000:
                print(outputs, training_outputs)
                total_error = NN.calculate_total_error(network.training_sets)
                print('error = ', total_error)
                network_data = NN.inspect(network.training_sets)
                with open('network.json', 'w') as outfile:
                    json.dump(network_data, outfile)
                count = 0
            else:
                count += 1
        except Exception as e:
            print(e)
            network_data = NN.inspect(network.training_sets)
            with open('network.json', 'w') as outfile:
                json.dump(network_data, outfile)

    network_data = NN.inspect(network.training_sets)
    with open('network.json', 'w') as outfile:
        json.dump(network_data, outfile)
    print(
        json.dumps(network_data,
                   sort_keys=True,
                   indent=4,
                   separators=(',', ': ')))
    print(NN.feed_forward(network.training_sets[0][0]))
    print('Total error: ', total_error)
Пример #5
0
 def fit(self,
         rand_steps=3,
         solver="l-bfgs-b",
         maxiter=500,
         maxfun=15000,
         tol=1e-4,
         iprint=0,
         exact=True,
         jac=True,
         rand_init=True):
     tik = time()
     #--optimization---
     self.arma_comp = arma.ARMA(self.train, (self.ar_ord, self.ma_ord),
                                0).fit(rand_steps=rand_steps,
                                       solver=solver,
                                       maxiter=maxiter,
                                       maxfun=maxfun,
                                       tol=tol,
                                       iprint=iprint,
                                       exact=exact,
                                       jac=jac,
                                       rand_init=rand_init)
     self.nn_comp = nn.NN(self.arma_comp.shocks,
                          (self.nn_in_size, 0, self.nn_hid_size),
                          0).fit(rand_steps=rand_steps,
                                 solver=solver,
                                 maxiter=maxiter,
                                 maxfun=maxfun,
                                 tol=tol,
                                 iprint=iprint,
                                 exact=exact,
                                 jac=jac,
                                 rand_init=rand_init)
     #------------
     print("fit-time:{}".format(time() - tik))
     self.W_arma, self.W1, self.W2, self.start_shocks = self.arma_comp.W_arma, self.nn_comp.W1, self.nn_comp.W2, self.arma_comp.start_shocks
     self.pred, self.shocks = self.arma_comp.pred + self.nn_comp.pred, self.nn_comp.shocks
     self.std_dev = np.sqrt(
         np.sum(np.square(self.shocks - np.mean(self.shocks))) /
         (self.shocks.shape[0] - 1))
     self.loglik = -0.5 * (self.train.shape[0] - self.ar_ord) * np.log(
         2 * np.pi * self.std_dev**2) - 0.5 * np.sum(
             np.square(self.shocks[self.ar_ord:])) / self.std_dev**2
     self.aic = -2 / self.train.shape[0] * (self.loglik -
                                            self.num_of_params)
     self.mse = np.sum(np.square(
         self.shocks[self.ar_ord:])) / self.train.shape[0]
     self.rmse = np.sqrt(self.mse)
     return self
    def __init__(self, env, agent_index, lr):
        ### I DON'T KNOW WHAT THE OBSERVATION SPACE IS I CANT RUN THIS -DIEGO
        self.id = agent_index
        self.env = env
        self.critic = None
        actions = env.action_space[self.id]  # need action space for this agent
        #### I PUT THIS HERE BECAUSE MICHAEL HAD IT, AGAIN I CAN'T VISUALIZE WHATS GOING ON -DIEGO
        if isinstance(actions, multiagent.multi_discrete.MultiDiscrete):
            self.action_space = actions.shape
        else:
            self.action_space = actions.n
        self.observation_space = env.observation_space[self.id].shape[0]

        self.net = nn.NN(self.observation_space, self.action_space)
        self.optimizer = optim.Adam(self.net.parameters(), lr=lr)
Пример #7
0
 def _init_value_network(self, input_dims, output_dims, minibatch_size=32):
     """
     A subclass may override this if a different sort
     of network is desired.
     """
     scale_factor = 2
     layer1 = layers.FlatInputLayer(
         minibatch_size, input_dims,
         np.asarray(self.observation_ranges, dtype='float32'), scale_factor)
     layer2 = layers.DenseLayer(layer1, 15, 0.1, 0, layers.tanh)
     layer3 = layers.DenseLayer(layer2, output_dims, 0.1, 0,
                                layers.identity)
     layer4 = layers.OutputLayer(layer3)
     return nn.NN([layer1, layer2, layer3, layer4],
                  batch_size=minibatch_size,
                  learning_rate=self.value_learning_rate)
Пример #8
0
def create_NN():
	#pick number of layers
	num_layers = random.randint(1, 9)

	#pick neurons in each layer
	neurons = list()
	for i in range(num_layers - 1):
		neurons.append(random.randint(80, 1000))

	#pick activation function
	actf_id = random.randint(0, 9)

	#pick random optimizer
	optim_id = random.randint(0, 7)
	
	#create neural network and return
	return nn.NN(num_layers, neurons, actf_id, optim_id)
Пример #9
0
class TestNN:
    nn_1 = nn.NN([3, 4, 5])

    def test_nn_layers(self):
        assert self.nn_1.weights[0].shape == (3, 4)
        assert self.nn_1.weights[1].shape == (4, 5)

    def test_invalid_nn_layers(self):
        with pytest.raises(ValueError):
            assert nn.NN([])
        with pytest.raises(ValueError):
            assert nn.NN([1])

    def test_feed_forward(self):
        x = np.array([[1, 1, 1], [2, 2, 2]])
        output = self.nn_1.forward(x)
        assert output.shape == (2, 5)
Пример #10
0
    def __init__(self, state, game_engine):
        # Set the state and game engine.
        self.state = state
        self.game = game_engine

        # Create a features object that will be used to compute the current features
        # of the state that Pacman cares about.
        self.features = features_nn.Features(state, game_engine)
        self.feature_dict = None
        self.prev_features = {}

        # Load the training data from file.
        self.training_data = {}
        self.load_training_data()

        self.nets = {}
        self.buffer = {}
        for dir in DIRECTIONS:
            #self.nets[dir]  = nn.NN( self.nndata['INUM'], self.nndata['HNUM'], self.nndata['ONUM'] )
            self.nets[dir] = nn.NN(self.nndata['nc_list'],
                                   self.nndata['af_list'])
            self.nets[dir].reconstruct(self.nndata[dir])
            self.buffer[dir] = []

        # TODO: make design decison about this stuff
        self.NUM_BITS = self.nndata['nc_list'][-1]
        self.HIGH = 10000
        self.LOW = -10000

        # Initialize other state that is used by the learning algorithm.
        self.cur_qvals = {}
        self.decision_count = 0
        self.prev_action = None
        self.prev_qval = None
        self.call_counter = 0

        # Initialize attributes for tracking results.
        self.results_mode = self.game.manager.config_options['results_mode']
        self.results_games = self.game.manager.config_options['results_games']
        self.games_count = 0
        self.average_score = 0.0
        self.average_level = 0.0
def test_mnist(
    corruption_level=0.0,
    noise_level=0.2,
    learning_rate=0.2,
    inertia_rate=0.0,
    nh=0.1,
    epochs=40000,
    verbose=False,
):
    # load data
    X, y, target_names = load_data(standardize=True)

    # get classifier
    clf = nn.NN(ni=X.shape[1],
                nh=int(nh * X.shape[1]),
                no=len(target_names),
                learning_rate=learning_rate,
                inertia_rate=inertia_rate,
                corruption_level=corruption_level,
                epochs=epochs)

    # cross validation
    skf = StratifiedKFold(y, n_folds=3)
    scores = np.zeros(len(skf))
    for i, (train_index, test_index) in enumerate(skf):
        # train the model
        clf.fit(X[train_index], y[train_index])
        # add noise to the x
        X_corrupted = X[test_index].copy()
        p = np.random.binomial(n=1,
                               p=1 - noise_level,
                               size=X[test_index].shape)
        X_corrupted[p == 0] = np.random.random(X_corrupted.shape)[p == 0]
        # get score
        score = clf.score(X_corrupted, y[test_index])
        scores[i] = score

    # stdout of the score
    if verbose is True:
        print scores

    return scores.mean(), clf
def test_cifar(corruption_level=0.0, epochs=10000, verbose=False):
    # load train data
    cifar = load_cifar()
    X = cifar.data
    y = cifar.target
    target_names = np.unique(y)

    # standardize
    X = X.astype(np.float64)
    X /= X.max()

    if verbose is True:
        print("Layer size: first: {0}, second: {1}, final: {2}".format(
            X.shape[1], 100, len(target_names)))
    clf = nn.NN(ni=X.shape[1],
                nh=100,
                no=len(target_names),
                corruption_level=corruption_level)

    X_train, X_test, y_train, y_test = train_test_split(X, y)

    # convert train data to 1-of-k expression
    label_train = LabelBinarizer().fit_transform(y_train)
    label_test = LabelBinarizer().fit_transform(y_test)

    clf.fit(X_train, label_train, epochs=epochs)

    y_pred = np.zeros(len(X_test))
    for i, xt in enumerate(X_test):
        o = clf.predict(xt)
        y_pred[i] = np.argmax(o)
    # print y_pred

    score = accuracy_score(y_true=y_test, y_pred=y_pred)
    if verbose is True:
        print classification_report(y_true=y_test, y_pred=y_pred)
        print confusion_matrix(y_true=y_test, y_pred=y_pred)
        print score

    return score
Пример #13
0
def breed(m1, m2):
	#choose keys to determine which parent to take parameter from
	key1 = random.randint(0, 1)
	key2 = random.randint(0, 1)
	key3 = random.randint(0, 1)

	if(key1 == 0):
		num_layers = m1.num_layers
		neurons = m1.neurons
	else:
		num_layers = m2.num_layers
		neurons = m2.neurons
	if(key2 == 0):
		activator_id = m1.activator_id
	else:
		activator_id = m2.activator_id
	if(key3 == 0):
		optimizer_id = m1.optimizer_id
	else:
		optimizer_id = m2.optimizer_id

	return nn.NN(num_layers, neurons, activator_id, optimizer_id)
def main(filepath=''):
    size = 16, 16

    network = NetworkInfo()
    if network.is_read_file_error:
        return
    NN = nn.NN(
        num_inputs=network.num_inputs,
        num_hidden=network.num_hidden,
        num_outputs=network.num_outputs,
        hidden_layer_weights=network.hidden_layer_weights,
        hidden_layer_bias=network.hidden_layer_bias,
        output_layer_weights=network.output_layer_weights,
        output_layer_bias=network.output_layer_bias,
        name_path=network.training_name,
    )

    img = Image.open(filepath)
    img.show()
    img = img.convert(mode='L')
    img = img.resize(size)
    histogram = get_histogram(img)

    network_outputs = NN.feed_forward(histogram)

    print('result:')
    answer = []
    for indx, output in enumerate(network_outputs):
        print(NN.name_path[indx].title() + ':\t' + str(output))
        if round(output) == 1:
            answer.append(indx)
    if len(answer) == 0:
        index = maxToIndex(network_outputs)
        print(u'Похож на объект под имененм: ' + str(NN.name_path[index]))
    elif len(answer) > 1:
        index = maxOfOutputsToIndex(answer, network_outputs)
        print(u'Скорее всего это объект: ' + str(NN.name_path[index]))
    else:
        print(u'Объект: ' + str(NN.name_path[answer[0]]))
def test_digits():
    # load train data
    digits = load_digits()
    X = digits.data
    y = digits.target
    target_names = digits.target_names

    # standardize
    X /= X.max()

    clf = nn.NN(ni=X.shape[1],
                nh=2 * X.shape[1],
                no=len(target_names),
                corruption_level=0.25)

    X_train, X_test, y_train, y_test = train_test_split(X, y)

    # convert train data to 1-of-k expression
    label_train = LabelBinarizer().fit_transform(y_train)
    label_test = LabelBinarizer().fit_transform(y_test)

    clf.fit(X_train,
            label_train,
            epochs=10000,
            learning_rate=0.4,
            inertia_rate=0.3)

    y_pred = np.zeros(len(X_test))
    for i, xt in enumerate(X_test):
        o = clf.predict(xt)
        y_pred[i] = np.argmax(o)
    # print y_pred

    print classification_report(y_true=y_test, y_pred=y_pred)
    print accuracy_score(y_true=y_test, y_pred=y_pred)
    print confusion_matrix(y_true=y_test, y_pred=y_pred)
Пример #16
0
# In[7]:

# fig = plt.figure(figsize=(15,5))

# for i in range(20):
#     ax = fig.add_subplot(4, 5, 1 + i, xticks=[], yticks=[])
#     im = output_image[:,i].reshape(IMAGE_DIMENSION,IMAGE_DIMENSION,3)
# plt.imshow(im)
# plt.show()

# In[ ]:

# In[11]:

Nn = nn.NN(IMAGE_DIMENSION)

# In[12]:

Nn.initialize_parameters()
J = []

# In[ ]:

iterations = 2
for step in range(iterations):
    Nn.forward_prop(input_image)
    l2 = Nn.Loss(output_image)
    Nn.backward_prop(input_image, output_image)
    Nn.learning_algorithm(0.1)
    if step % 100 == 0:
Пример #17
0
def setup(self):
    print("Setting up")

    ## Setup nn ##
    self.nn = nn.NN([1, 16, 1])
Пример #18
0
import nn
import visualnn
import os

rel_path = './example_weights/2x2classifier.csv'

horizontal1 = [1, 1, 0, 0]
horizontal2 = [0, 0, 1, 1]
vertical1 = [0, 1, 0, 1]
vertical2 = [1, 0, 1, 0]
checkered1 = [0, 1, 1, 0]
checkered2 = [1, 0, 0, 1]

toy = nn.NN(input_size=4,
            output_size=3,
            num_hidden=1,
            hidden_size=6,
            nonlinearity='relu',
            labels=['horizonal', 'vertical', 'checkered'])
toy.init_weights(rel_path)
#toy.save_weights(script_dir + '/example_weights/save_weights_test.csv')

visualize = visualnn.VisualNN(toy)
visualize.draw('Network architecture')

_, classification, scores = toy.predict(horizontal1)
print('scores: %a' % scores)
print('correct: horizontal')
print('prediction: %s' % classification)

visualize.update()
visualize.draw('Horizontal1')
Пример #19
0
import data
import nn

training_data, validation_data, test_data = data.load_data()
ann = nn.NN([784, 30, 20, 40, 10])

ann.stochastic_gradient_desc(training_data, 20, 100, 3.0, test_data=test_data)
Пример #20
0
def AICOptimizer(model_type,
                 max_ar_ord,
                 max_ma_ord,
                 max_in_size,
                 max_hid_size,
                 data,
                 old_params_list=None,
                 insurance=200,
                 rand_steps=3,
                 solver="l-bfgs-b",
                 maxiter=500,
                 maxfun=15000,
                 tol=1e-4,
                 iprint=0,
                 exact=True,
                 jac=True,
                 rand_init=True):
    params_list = get_valid_params(max_ar_ord, max_ma_ord, max_in_size,
                                   max_hid_size, model_type, old_params_list)
    aic_min, insurance_count, best_model, best_order = np.inf, 0, None, None
    RES = pd.DataFrame(index=["order", "aic"])
    ERRs = []
    for order in params_list:
        try:
            if model_type.lower() == "arma":
                model = arma.ARMA(data, order, 0).fit(rand_steps=rand_steps,
                                                      solver=solver,
                                                      maxiter=maxiter,
                                                      maxfun=maxfun,
                                                      tol=tol,
                                                      iprint=iprint,
                                                      exact=exact,
                                                      jac=jac,
                                                      rand_init=rand_init)
            elif model_type.lower() == "nn":
                model = nn.NN(data, order, 0).fit(rand_steps=rand_steps,
                                                  solver=solver,
                                                  maxiter=maxiter,
                                                  maxfun=maxfun,
                                                  tol=tol,
                                                  iprint=iprint,
                                                  exact=exact,
                                                  jac=jac,
                                                  rand_init=rand_init)
            elif model_type.lower() == "armann":
                model = armann.ARMA_NN(data, order,
                                       0).fit(rand_steps=rand_steps,
                                              solver=solver,
                                              maxiter=maxiter,
                                              maxfun=maxfun,
                                              tol=tol,
                                              iprint=iprint,
                                              exact=exact,
                                              jac=jac,
                                              rand_init=rand_init)
            elif model_type.lower() == "armann_zhang":
                model = armann_zhang.ARMA_NN_Zhang(data, order, 0).fit(
                    rand_steps=rand_steps,
                    solver=solver,
                    maxiter=maxiter,
                    maxfun=maxfun,
                    tol=tol,
                    iprint=iprint,
                    exact=exact,
                    jac=jac,
                    rand_init=rand_init)
            else:
                print("Invalid model type")
                return None
        except:
            print("||ERROR|| Error on order:{}".format(order))
            ERRs.append(order)
            continue
        if model.aic < aic_min:
            aic_min = model.aic
            best_model = model
            best_order = order
        RES = RES.append({
            "order": model.order,
            "aic": model.aic
        },
                         ignore_index=True)
        print("||New Model Fit|| model order:{}, model aic:{}".format(
            model.order, model.aic))
        insurance_count += 1
        if insurance_count == insurance:
            RES.to_excel("models_aic.xlsx")
            insurance_count = 0
    return best_model, best_order, aic_min, RES, ERRs
Пример #21
0
 def __init__(self, tagger, arc_tagger, model_path):
     self.tagger = tagger
     self.arc_tagger = arc_tagger
     self.classifier = classifier.Classifier()
     self.nn = nn.NN(model_path)  # TODO: Create arc tagger
Пример #22
0
def run_pendulum(network, tf_ep, pendulum_length):
    dataset = 'data/single_action_2_pendulum_data_L%s.npz' % pendulum_length
    g = 10.0  # default gravity value in openAI
    length = float(pendulum_length)
    # Data size on the solution u
    N_u = 1000
    # Collocation points size, where we’ll check for f = 0
    N_f = 1500
    # DeepNN 1-sized input [t], 8 hidden layer of 20-width, 1-sized output [u]
    layers = [1, 80, 80, 80, 80, 80, 80, 80, 80, 1]
    # Setting up the TF SGD-based optimizer (set tf_epochs=0 to cancel it)
    tf_epochs = int(tf_ep)
    tf_optimizer = tf.keras.optimizers.Adam(learning_rate=0.007, epsilon=1e-1)
    # Setting up the quasi-newton LBGFS optimizer (set nt_epochs=0 to cancel it

    # Creating the model and training
    X_f, Exact_u, X_u_train, u_train, lb, ub = prep_data.prep_data(dataset,
                                                                   N_u,
                                                                   N_f,
                                                                   noise=0.01)
    plt.scatter(X_u_train, u_train, marker='.')
    plt.show()
    logger = Logger.Logger(frequency=10)

    # Train with physics informed network
    if network == 'pinn':
        pinns = pinn.PhysicsInformedNN(layers, tf_optimizer, logger, X_u_train,
                                       ub, lb, g, length)

        def error():
            u_pred, _ = pinns.predict(X_f)
            return np.linalg.norm(Exact_u - u_pred, 2) / np.linalg.norm(
                Exact_u, 2)

        logger.set_error_fn(error)
        pinns.fit(X_u_train, u_train, tf_epochs)
        u_pred, f_pred = pinns.predict(X_f)
        plt.scatter(X_f, u_pred, marker='.', c='r')
        plt.xlabel("Time (s)")
        plt.ylabel("Theta")
        plt.title("Predicted Data from Physics Informed NeuralenNetwork")
        plt.savefig("plots/PINN_Predicted_Data.png")
    # Train without physics
    else:
        nns = nn.NN(layers, tf_optimizer, logger, X_u_train, ub, lb, g, length)

        def error():
            u_pred, _ = nns.predict(X_f)
            return np.linalg.norm(Exact_u - u_pred, 2) / np.linalg.norm(
                Exact_u, 2)

        logger.set_error_fn(error)
        nns.fit(X_u_train, u_train, tf_epochs)
        u_pred, f_pred = nns.predict(X_f)
        plt.scatter(X_f, u_pred, marker='.', c='r')
        plt.xlabel("Time (s)")
        plt.ylabel("Theta")
        plt.title("Predicted Data from Physics Uninformed Neural Network")
        plt.savefig("plots/NN_Predicted_Data.png")
        # plt.show()
        pass
Пример #23
0
 def __init__(self, gamma, net_lr=0.01):
     self.neur_net = nn.NN([64, 128, 128, 64, 64], net_lr)
     self.explore_rate = 0.6
     self.gamma = gamma
     self.game_list = []
     self.wins = 0
Пример #24
0
D = 2  # dimensionality
K = 3  # number of classes
X = np.zeros((N * K, D))  # data matrix (each row = single example)
y = np.zeros(N * K, dtype='uint8')  # class labels
for j in range(K):
    ix = range(N * j, N * (j + 1))
    r = np.linspace(0.0, 1, N)  # radius
    t = np.linspace(j * 4, (j + 1) * 4, N) + np.random.randn(N) * 0.2  # theta
    X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
    y[ix] = j

# some hyperparameters
step_size = 1e-0
reg = 1e-3  # regularization strength

neuralnet = nn.NN(input_size=D, output_size=K, num_hidden=1, hidden_size=100)
neuralnet.print_meta()

visualize = visualnn.VisualNN(neuralnet)
visualize.save('traintest/frame0.png', "Epoch 0")

for step in range(25):
    loss = neuralnet.train(X,
                           y,
                           step_size=step_size,
                           reg_strength=reg,
                           epochs=200,
                           compute_loss_every=100)
    visualize.update()
    visualize.save('traintest/frame%d.png' % (step + 1),
                   "Epoch %d, Loss = %f" % ((step + 1) * 200, loss))
import matplotlib.pyplot as plt
import nn as nn
import utils_topix as utils


if __name__ == '__main__':
    
    net_blocks = {'n_inputs': 1, 
                  'layers': [ 
                          {'type': 'dense', 'activation': 'leaky_relu', 'shape': (None, 30)},                    
                          {'type': 'dense', 'activation': 'leaky_relu', 'shape': (None, 1)}
                          ]
                  }
    
    # create the net    
    net = nn.NN(net_blocks)
    
    # initialize the parameters
    net.init_parameters(['uniform', -.1e-1, 2e-1])

    # create the batches from topix dataset
    X_train, Y_train, X_valid, Y_valid, X_test, Y_test = utils.generate_batches(
                                                              filename='data/test_lag.csv', 
                                                              window=net.n_inputs, mode='validation', 
                                                              non_train_percentage=.3,
                                                              val_rel_percentage=.5,
                                                              normalize=True,
                                                              time_difference=True)       

    epochs_train = 5
       
Пример #26
0
        binarizer = LabelBinarizer()
        Y = binarizer.fit_transform(y)

        y = y.reshape(len(y), 1)
        ytest = ytest.reshape(len(ytest), 1)

        image_pixels = X.shape[1]
        k = len(np.unique(y))
        alpha = 0.01
        epochs = 20
        hidden_layer_size = [10, 100, 250, 500, 750]
        test_errors = []
        train_errors = []

        for h in hidden_layer_size:
            model = nn.NN(no_of_in_nodes=image_pixels, no_of_out_nodes=k, no_of_hidden_nodes=h, learning_rate=alpha,
                          bias=None)
            weights = model.fit(X, Y, epochs=epochs, intermediate_results=True)
            for i in range(epochs):
                print("epoch: ", i)
                model.wih = weights[i][0]
                model.who = weights[i][1]
                corrects, wrongs = model.evaluate(X, y)
                train_error = 1 - corrects / (corrects + wrongs)
                print("train error: ", train_error)
                corrects, wrongs = model.evaluate(Xtest, ytest)
                test_error = 1 - corrects / (corrects + wrongs)
                print("test error: ", test_error)
            test_errors = np.append(test_errors, test_error)
            train_errors = np.append(train_errors, train_error)
        plt.plot(hidden_layer_size, test_errors, label="validation error")
        plt.plot(hidden_layer_size, train_errors, label="training error")
Пример #27
0
import nn
import visualnn
from matplotlib import pyplot as plt


N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in range(K):
  ix = range(N*j,N*(j+1))
  r = np.linspace(0.0,1,N) # radius
  t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
  X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
  y[ix] = j

# some hyperparameters
step_size = 1e-0
reg = 1e-3  # regularization strength

neuralnet = nn.NN(input_size=D, output_size=K, num_hidden=2, hidden_size=40)
neuralnet.print_meta()

visualize = visualnn.VisualNN(neuralnet)
visualize.save('traintest2/frame0.png', "Epoch 0")

for step in range(50):
    loss = neuralnet.train(X, y, step_size=step_size, reg_strength=reg, epochs=50, compute_loss_every=10)
    visualize.update()
    visualize.save('traintest2/frame%d.png' % (step + 1), "Epoch %d, Loss = %f" % ((step+1)*50, loss))
Пример #28
0
def feature_test_mnist(verbose=True):
    print("... loading date")
    # load train data
    mnist = fetch_mldata('MNIST original')
    X_origin = mnist.data
    y = mnist.target
    target_names = np.unique(y)
    # standardize
    X_origin = X_origin.astype(np.float64)
    X_origin /= X_origin.max()
    print("--- done")

    print("... encoding with denoising auto-encoder")
    # get feature & create input
    ae = AutoEncoder(X=X_origin,
                     hidden_size=22 * 22,
                     activation_function=T.nnet.sigmoid,
                     output_function=T.nnet.sigmoid)
    ae.train(n_epochs=5, mini_batch_size=20)
    X = ae.get_hidden(data=X_origin)[0]
    print("--- done")

    # get classifier
    clf = nn.NN(ni=X.shape[1],
                nh=int(0.16 * X.shape[1]),
                no=len(target_names),
                learning_rate=0.3,
                inertia_rate=0.12,
                corruption_level=0.0,
                epochs=150000)

    # cross validation
    skf = StratifiedKFold(y, n_folds=3)
    scores = np.zeros(len(skf))
    for i, (train_index, test_index) in enumerate(skf):
        # train the model
        clf.fit(X[train_index], y[train_index])
        # get score
        score = clf.score(X[test_index], y[test_index])
        scores[i] = score

    # stdout of the score
    if verbose is True:
        print(scores)

    print("... plotting the autoencoder hidden layer")
    # get tiled image
    p = np.random.randint(0, len(X), 400)
    tile = tile_raster_images(X[p], (22, 22), (20, 20),
                              scale_rows_to_unit_interval=True,
                              output_pixel_vals=True,
                              tile_spacing=(1, 1))
    # save tiled data's image
    plt.axis('off')
    plt.title('MNIST dataset')
    plt.imshow(tile, cmap=plt.cm.gray_r)
    plt.savefig('../output/tiled_autoencoder_hidden_mnist.png')
    print("--- done")

    print("... saving the results")
    data = {
        'scores': scores,
        'hidden layer': X,
    }
    with gzip.open('../output/feature_test_mnist.pkl.gz', 'wb') as f:
        cPickle.dump(data, f)
    print("--- done")
Пример #29
0
toutputs0 = np.array([[1], [1], [1], [0]])
toutputs1 = np.array([[0], [0], [0], [1]])

try:
    with open(wfn0, "rb") as f:
        weights0 = np.load(f)
except:
    weights0 = np.array([[.1], [.2]])

try:
    with open(wfn1, "rb") as f:
        weights1 = np.load(f)
except:
    weights1 = np.array([[.1], [.2]])

net0 = nut.NN(tinputs, toutputs0, weights0, wfn0)
net1 = nut.NN(tinputs, toutputs1, weights1, wfn1)

for a in count(0):
    net0.train()
    net1.train()

    in1 = np.array([1, 1])
    in2 = np.array([1, 0])
    in3 = np.array([0, 1])
    in4 = np.array([0, 0])

    out1 = np.array([net1.run(in1), net0.run(in1)])
    out2 = np.array([net1.run(in2), net0.run(in2)])
    out3 = np.array([net1.run(in3), net0.run(in3)])
    out4 = np.array([net1.run(in4), net0.run(in4)])
Пример #30
0
    add_and_pop(sids_train[1], sids[i], N_test // 2)
    add_and_pop(sids_train[0], sids[i], N // 2)
    add_and_pop(sids_val[1], sids[i], N_test // 2)
    add_and_pop(sids_val[0], sids[i], N // 2)
y_train, y_val = [[], []], [[], []]
y_train[0] = np.array([assays[sid] for sid in sids_train[0]], dtype=np.int32)
y_train[1] = np.array([assays[sid] for sid in sids_train[1]], dtype=np.int32)
y_val[0] = np.array([assays[sid] for sid in sids_val[0]], dtype=np.int32)
y_val[1] = np.array([assays[sid] for sid in sids_val[1]], dtype=np.int32)
print('done.')

# Setup NN
net = nn.NN(d=d,
            batchsize=batchsize,
            n_train_epoch=n_train_epoch,
            n_val_epoch=n_val_epoch,
            n_units=m)

# Setup NFP generator
_nfp = fp.nfp(d, f, R)

# Learn
for epoch in six.moves.range(1, n_epoch + 1):
    print('epoch', epoch)

    result = _nfp.update(sids_train, y_train, net, train=True)
    print('train acc = %f' % result)
    result = _nfp.update(sids_val, y_val, net, train=False)
    print('validation acc = %f' % result)