def run_network(filename, n, eta):
    """Train the network using both the default and the large starting
    weights.  Store the results in the file with name ``filename``,
    where they can later be used by ``make_plots``.

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network2.Network([784, n, 10], cost=network2.CrossEntropyCost)
    print "Train the network using the default starting weights."
    default_vc, default_va, default_tc, default_ta = net.SGD(
        training_data, 30, 10, eta, lmbda=5.0, evaluation_data=validation_data, monitor_evaluation_accuracy=True
    )
    print "Train the network using the large starting weights."
    net.large_weight_initializer()
    large_vc, large_va, large_tc, large_ta = net.SGD(
        training_data, 30, 10, eta, lmbda=5.0, evaluation_data=validation_data, monitor_evaluation_accuracy=True
    )
    f = open(filename, "w")
    json.dump(
        {
            "default_weight_initialization": [default_vc, default_va, default_tc, default_ta],
            "large_weight_initialization": [large_vc, large_va, large_tc, large_ta],
        },
        f,
    )
    f.close()
def trainnetwork():
     
     training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
     
     network= Network([1200,30,10])
     network.SGD(training_data,30,10,.5,test_data=test_data)
     network.save("network.txt")
Esempio n. 3
0
def train_mnist_worker(params):
    net_id = params.get('net-id', 'nn')
    layers = [784]
    layers.extend([int(i) for i in params.get('layers', [15])])
    layers.append(10)
    net_params                    = {}
    net_params['epochs']          = int(params.get('epochs', 1))
    net_params['mini_batch_size'] = int(params.get('mini-batch-size', 4))
    net_params['eta']             = float(params.get('eta', 0.1))
    net_params['lmbda']           = float(params.get('lmbda', 0.0001))
    net_params['layers']          = layers

    redis.set(redis_key('params', net_id), json.dumps(net_params))
    redis.set(redis_key('status', net_id), 'train_mnist: started')

    net = Network(layers)
    training_data, validation_data, test_data = load_data_wrapper()
    redis.set(redis_key('status', net_id), 'train_mnist: training with mnist data')
    net.SGD(training_data, net_params['epochs'],
                           net_params['mini_batch_size'],
                           net_params['eta'],
                           net_params['lmbda'])

    redis.set(redis_key('data', net_id), net.tostring())
    redis.set(redis_key('status', net_id), 'train_mnist: trained')
    def __init__(self, fileNames=[]):
        self.fileNames = fileNames
        self.trainingData,self.valData,self.testData = \
            mnist_loader.load_data_wrapper()
        self.data = [self.trainingData, self.valData, self.testData]

        self.fileNameDataPairs = zip(self.fileNames, self.data)
Esempio n. 5
0
def visualize():
  training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
  # Unzipping gives tuples, but we want arrays of values.
  training_input = [x.transpose()[0] for x in zip(*training_data)[0]]
  test_input = [x.transpose()[0] for x in zip(*test_data)[0]]
  # Get the y values.
  test_target = [y for y in zip(*test_data)[1]]

  # Apply SVD to the training input.
  u, s, v = np.linalg.svd(training_input, full_matrices=False)
  print u.shape
  print s.shape
  print v.shape
  
  print "Generating embeddings..."
  #print v[0]
  print v[0].shape
  embeddings = [np.dot(test_inp, np.transpose(v[:10][:])) for test_inp in test_input]
  print embeddings[0].shape
  
  # Do dimensionality reduction into 2 dimensions.
  print "Performing dimensionality reduction using t-sne..."
  tsne = TSNE()
  reduced_vecs = tsne.fit_transform(embeddings)
  print reduced_vecs[0]

  # Graph all of the points, where points corresponding to the same digit will have the same color.
  colors = ['r', 'b', 'g', 'c', 'm', 'k', 'y', (.2, .2, .2), (.4, 0, .5), (.8, .2, 0)]
  red_patch = mpatches.Patch(color='red', label='1')
  patches = [mpatches.Patch(color=colors[i], label='%i'% i) for i in range(len(colors))]
  plt.legend(handles=patches)
  for i in range(len(reduced_vecs)):
    plt.plot([reduced_vecs[i][0]], [reduced_vecs[i][1]], 'o', color=colors[test_target[i]])
  plt.show()
def main():
    # Load the data
    full_td, _, _ = mnist_loader.load_data_wrapper()
    td = full_td[:1000]  # Just use the first 1000 items of training data
    epochs = 500  # Number of epochs to train for

    print "\nTwo hidden layers:"
    net = network2.Network([784, 30, 30, 10])
    initial_norms(td, net)
    abbreviated_gradient = [ag[:6] for ag in get_average_gradient(net, td)[:-1]]
    print "Saving the averaged gradient for the top six neurons in each " + "layer.\nWARNING: This will affect the look of the book, so be " + "sure to check the\nrelevant material (early chapter 5)."
    f = open("initial_gradient.json", "w")
    json.dump(abbreviated_gradient, f)
    f.close()
    shutil.copy("initial_gradient.json", "../../js/initial_gradient.json")
    training(td, net, epochs, "norms_during_training_2_layers.json")
    plot_training(epochs, "norms_during_training_2_layers.json", 2)

    print "\nThree hidden layers:"
    net = network2.Network([784, 30, 30, 30, 10])
    initial_norms(td, net)
    training(td, net, epochs, "norms_during_training_3_layers.json")
    plot_training(epochs, "norms_during_training_3_layers.json", 3)

    print "\nFour hidden layers:"
    net = network2.Network([784, 30, 30, 30, 30, 10])
    initial_norms(td, net)
    training(td, net, epochs, "norms_during_training_4_layers.json")
    plot_training(epochs, "norms_during_training_4_layers.json", 4)
Esempio n. 7
0
def convert_test():  
    fulltrain = pd.read_csv('dat/train.csv')
    trainx = fulltrain.drop(['Id','Cover_Type'], axis=1) # Features
    trainy = fulltrain['Cover_Type'] # Target

    newtrain = train_pd_to_nielsen(trainx, trainy, trainy.max())

    print np.asarray(newtrain).shape
    print np.asarray(newtrain)[0][0].shape
    print np.asarray(newtrain)[0][1].shape

    nisttrain, nistvalid, nisttest = mnist_loader.load_data_wrapper(nielsen_path + 'data/mnist.pkl.gz')
    print np.asarray(nisttrain).shape
    print np.asarray(nisttrain)[0][0].shape
    print np.asarray(nisttrain)[0][1].shape

    print '-'*50
    newtest = test_pd_to_nielsen(trainx, trainy)

    print np.asarray(newtest).shape
    print np.asarray(newtest)[0][0].shape
    print np.asarray(newtest)[0][1].shape

    print np.asarray(nistvalid).shape
    print np.asarray(nistvalid)[0][0].shape
    print np.asarray(nistvalid)[0][1].shape
Esempio n. 8
0
def run():
    np.random.seed(0)
    data = load_data_wrapper()
    initial_learning_rate = 0.2
    network = Network(
        [784, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 10],
        activations_function=ReLU(),
        cost=CrossEntropy(),
        stopping_criteria=NEpochs(50),  # LearningRateDecreaseLimit(
        #     initial_learning_rate=initial_learning_rate,
        #     limit=1/2
        # ),
        learning_rate=FixedLearningRate(0.01),  # HalfLRIfNoDecreaseInNEpochs(
        #     monitor_parameter='validation_cost',
        #     max_epochs=1,
        #     initial_learning_rate=initial_learning_rate
        # ),
        update_algorithm=Momentum(momentum=0.5, base_algorithm=L2UpdateAlgorithm(lmbda=5)),
        weight_initializer=initialize_input_dim_normalized_weights,
    )
    t0 = datetime.utcnow()
    network.sgd(training_data=data[0], validation_data=data[1], test_data=data[2], mini_batch_size=12)
    print("Total time fast {}".format(datetime.utcnow() - t0))
    plot_stats(network)
    return network.log
def main():
    f = open('myNeural.txt', 'r')
    net = pickle.load(f)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    # 处理为 scikit_learn所需的数据结构
    X_train = [np.reshape(x, (1, -1))[0] for (x, y) in training_data]
    y_train = [np.argmax(np.reshape(y, (1, -1))[0]) for (x, y) in training_data]

    # Fit estimators
    ESTIMATORS = {

        # KNN
        "K-nn": neighbors.KNeighborsClassifier().fit(X_train, y_train),
        # 朴素贝叶斯
        "native-bayes": BernoulliNB().fit(X_train, y_train)
        # 决策树
        # 聚类
    }

    for i in test_data:
        print '=================================='
        testdata = i[0]
        print '正确的结果为%d' % i[1]
        # 使用神经元网络验证
        result = np.argmax(net.feedforward(testdata))
        print '使用神经元网络分类的结果为', result
        for name, estimator in ESTIMATORS.items():
            print '使用%s进行分类,分类结果为%s' % (name, estimator.predict(np.reshape(testdata, (1, -1))))
def run_network(filename, num_epochs, training_set_size=1000, lmbda=0.0):
    """Train the network for ``num_epochs`` on ``training_set_size``
    images, and store the results in ``filename``.  Those results can
    later be used by ``make_plots``.  Note that the results are stored
    to disk in large part because it's convenient not to have to
    ``run_network`` each time we want to make a plot (it's slow).

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    #net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
    net = network2.Network([784, 30, 10], cost=network2.LogLikelihoodCost)
    net.large_weight_initializer()
    test_cost, test_accuracy, training_cost, training_accuracy, output_activations \
        = net.SGD(training_data[:training_set_size], num_epochs, 10, 0.5,
                  evaluation_data=test_data, lmbda = lmbda,
                  monitor_evaluation_cost=True, 
                  monitor_evaluation_accuracy=True, 
                  monitor_training_cost=True, 
                  monitor_training_accuracy=True)
    f = open(filename, "w")
    '''
    print "test_cost",type(test_cost),test_cost
    print
    print "test_accuracy",type(test_accuracy), test_accuracy
    print
    print "training_cost",type(training_cost), training_cost
    print 
    print "training_accuracy",type(training_accuracy), training_cost
    print
    '''
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy, output_activations], f)
    f.close()
def run_network(filename):
    """Train the network using both the default and the large starting
    weights.  Store the results in the file with name ``filename``,
    where they can later be used by ``make_plots``.

    """
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    print "Train the network using the default starting weights."
    default_vc, default_va, default_tc, default_ta \
        = net.SGD(training_data, 30, 10, 0.01,
                  evaluation_data=validation_data, lmbda = 0.001,
                  monitor_evaluation_accuracy=True)
    print "Train the network using the large starting weights."
    net.large_weight_initializer()
    large_vc, large_va, large_tc, large_ta \
        = net.SGD(training_data, 30, 10, 0.01,
                  evaluation_data=validation_data, lmbda = 0.001,
                  monitor_evaluation_accuracy=True)
    f = open(filename, "w")
    json.dump({"default_weight_initialization":
               [default_vc, default_va, default_tc, default_ta],
               "large_weight_initialization":
               [large_vc, large_va, large_tc, large_ta]}, 
              f)
    f.close()
def run_networks():
    """Train networks using three different values for the learning rate,
    and store the cost curves in the file ``multiple_eta.json``, where
    they can later be used by ``make_plot``.

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    results = []
    for eta in LEARNING_RATES:
        print "\nTrain a network using eta = " + str(eta)
        net = network2.Network([784, 30, 10])
        results.append(
            net.SGD(
                training_data,
                NUM_EPOCHS,
                10,
                eta,
                lmbda=5.0,
                evaluation_data=validation_data,
                monitor_training_cost=True,
            )
        )
    f = open("multiple_eta.json", "w")
    json.dump(results, f)
    f.close()
Esempio n. 13
0
def load_data():
    train, val, test = mnist_loader.load_data_wrapper()
    train = list(train)
    val = list(val)
    test = list(test)

    return train, val, test
Esempio n. 14
0
def learn(request):
    tr_d, v_d, t_d = mnist_loader.load_data_wrapper()
    net.SGD(tr_d,30,10,3.0)
    print net.evaluate(t_d)
    return HttpResponseRedirect('/')  



            
Esempio n. 15
0
    def __init__(self, sizes):
        print("Starting...");
        start = time.time()
        training_data, validation_data, test_data = \
	       mnist_loader.load_data_wrapper()
        self.net = network.Network(sizes)
        (self.net).SGD(training_data, 30, 10, 3.0, test_data=test_data)
        end = time.time()
        print("Done!\nTime Taken: " + str(end - start));
Esempio n. 16
0
def recognize(img_name):
	training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
	img_list= loadImage(img_name)
	net = network.Network([784, 100, 10])  #input,hidden,output
	#net.SGD(training_data, 5, 10, 2.0, test_data = test_data)
	f=open('100hl.bin','rb')
	net.biases = np.load(f)	
	net.weights = np.load(f)	 # load trained weights  and biases
	f.close() #close the file after reading weights and biases
	#check for image
	return net.feedforward(img_list)
def main():
    # Time to processs dataset
    start_process = timeit.default_timer()

    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

    net = NeuralNetwork([784, 30, 10])
    net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

    time_neural = timeit.default_timer() - start_process

    print "The total time to run the neural network was: %d seconds" %(int(time_neural))
Esempio n. 18
0
def run_networks():
    # Make results more easily reproducible    
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    # instantiate network
    net = nnetwork.Network([INPUT_NEURONS,HIDDEN_NEURONS,OUTPUT_NEURONS])
    # run SGD
    results = net.gradientDescent(training_data, BATCH_SIZE, eta, NUM_EPOCHS,
                    test_data=test_data)
    f = open("learning.json", "w")
    json.dump(results, f)
    f.close()
Esempio n. 19
0
def test_cross_entropy_cost():
	training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
	net2 = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
	net2.large_weight_initializer()
	net2.SGD(training_data, 30, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)


# import network
# from mnist_load import MnistLoad
# loader = MnistLoad('../data-new/')
# train_data, test_data = loader.loadAsNumpyData()
# net = network.Network([784, 30, 10])
# net.SGD(train_data, 30, 10, 3.0, test_data = test_data)
def main():
    filename = 'test'
    # load data
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    # example
    epochs = 5
    net = network2.Network([784, 10], cost=network2.CrossEntropyCost)
    vc, va, tc, ta = net.SGD(training_data=training_data, epochs=epochs, mini_batch_size=100, eta=0.1, lmbda = 0.1, reg = 2,
        evaluation_data=validation_data, 
        monitor_evaluation_cost=True,
        monitor_evaluation_accuracy=True,
        monitor_training_cost=True,
        monitor_training_accuracy=True)
Esempio n. 21
0
def run_networks():
    # Make results more easily reproducible    
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = nnetwork1.Network([INPUT_NEURONS,HIDDEN_NEURONS,OUTPUT_NEURONS], mu)
    results = []
    for eta in LEARNING_RATES:
        print "\nTrain a network using eta = "+str(eta)
        results.append(net.gradientDescent(training_data, batch_size, eta, NUM_EPOCHS,
                    test_data=test_data))
    f = open("eta_graph1.json", "w")
    json.dump(results, f)
    f.close()
Esempio n. 22
0
def run_networks():
    # Make results more easily reproducible    
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = nnetwork2.Network([784, 30, 10], 0.5)
    results = []
    for eta in LEARNING_RATES:
        print "\nTrain a network using eta = "+str(eta)
        results.append(net.gradientDescent(training_data, 10, eta, NUM_EPOCHS,
                    test_data=test_data))
    f = open("eta_graph2.json", "w")
    json.dump(results, f)
    f.close()
Esempio n. 23
0
    def __init__(self):
        QtGui.QMainWindow.__init__(self)
        Ui_MainWindow.__init__(self)
        self.setupUi(self)
        self.trainButton.clicked.connect(self.trainButtonAction)
        self.loadParamButton.clicked.connect(self.loadData)
        self.saveParamButton.clicked.connect(self.saveData)
        self.testDigitButton.clicked.connect(self.testDigit)
        self.showDigitButton.clicked.connect(self.showDigit)
        self.psDigitTest.clicked.connect(self.testPsDigit)
        self.drawButton.clicked.connect(self.drawDigitAndTest)

        self.training_data, self.validation_data, self.test_data = mnist_loader.load_data_wrapper()
        self.net = network.Network([784, 30, 10])
        self.textEdit.append('Network created.')
def main(layers=None):
    # Load the data
    full_td, _, _ = mnist_loader.load_data_wrapper()
    td = full_td[:1000] # Just use the first 1000 items of training data
    epochs = 500 # Number of epochs to train for

    if layers is None:
      layers = [784, 30, 30, 10]
    net = network.Network(layers)
    initial_norms(td, net)
    abbreviated_gradient = [
        ag[:6] for ag in get_average_gradient(net, td)[:-1]]
    training(td, net, epochs, "norms_during_training_2_layers.json")
    plot_training(
        epochs, "norms_during_training_2_layers.json", 2)
def run_networks():
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    accuracies = []
    for size in SIZES:
        print "\n\nTraining network with data set size %s" % size
        net.large_weight_initializer()
        num_epochs = 1500000 / size 
        net.SGD(training_data[:size], num_epochs, 10, 0.05, lmbda = 0.001)
        accuracy = net.accuracy(validation_data) / 100.0
        print "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data.json", "w")
    json.dump(accuracies, f)
    f.close()
def run_network(filename, lmbda=0.0):
    """Train the network, and store the results in ``filename``.  Those
    results can later be used by ``make_plots``.  Note that the
    results are stored to disk in large part because it's convenient
    not to have to ``run_network`` each time we want to make a plot
    (it's slow).

    """
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    test_cost, test_accuracy, training_cost, training_accuracy \
        = net.SGD(training_data[:1000], NUM_EPOCHS, 10, 0.05,
                  evaluation_data=test_data, lmbda = lmbda,
                  monitor_evaluation_cost=True, 
                  monitor_evaluation_accuracy=True, 
                  monitor_training_cost=True, 
                  monitor_training_accuracy=True)
    f = open(filename, "w")
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy], f)
    f.close()
Esempio n. 27
0
def visualize():
  infile = open("trained_autoencoder.pkl")
  auto_enc = pickle.load(infile)

  training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
  # Unzipping gives tuples, but we want arrays of values.
  training_input = [x for x in zip(*training_data)[0]]
  test_input = [x for x in zip(*test_data)[0]]
  # Get the y values.
  test_target = [y for y in zip(*test_data)[1]]
  print test_target[0]

  '''
  # Encode all of the MNIST test set using the autoencoder.
  # TODO: get rid of debugging, do all points not just 50
  print "Encoding MNIST using autoencoder..."
  autoencoder_encoded_vecs = [auto_enc.feedforward(test_inp, embed=True).transpose()[0] for test_inp in test_input]
  print len(autoencoder_encoded_vecs)
  # print autoencoder_encoded_vecs[0]
  # print autoencoder_encoded_vecs[0].shape
  '''
  # Do dimensionality reduction into 2 dimensions using t-sne.
  print "Performing dimensionality reduction using t-sne..."
  tsne = sklearn.manifold.TSNE()
  # Try just using tsne on the raw MNIST digits data.
  autoencoder_encoded_vecs = [inp.transpose()[0] for inp in test_input]
  reduced_vecs = tsne.fit_transform(autoencoder_encoded_vecs)    
  print reduced_vecs[0]
  #plt.plot([p[0] for p in reduced_vecs[:30]], [p[1] for p in reduced_vecs[:30]], 'ro')
  

  # Graph all of the points, where points corresponding to the same digit will have the same color.
  colors = ['r', 'b', 'g', 'c', 'm', 'k', 'y', (.2, .2, .2), (.4, 0, .5), (.8, .2, 0)]
  red_patch = mpatches.Patch(color='red', label='1')
  patches = [mpatches.Patch(color=colors[i], label='%i'% i) for i in range(len(colors))]
  plt.legend(handles=patches)
  for i in range(len(reduced_vecs)):
    plt.plot([reduced_vecs[i][0]], [reduced_vecs[i][1]], 'o', color=colors[test_target[i]]) 
  plt.show()
Esempio n. 28
0
def main():
  # Load MNIST
  # training_input = mnist_loader.load_training_input()

  training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
  # Unzipping gives tuples, but we want arrays of values.
  training_input = [x for x in zip(*training_data)[0]]
  test_input = [x for x in zip(*test_data)[0]]
  print type(training_input[0])
  #print(training_input[0][:10])
  #print(autoencoder.add_noise(training_input[0])[:10])

  # Make autoencoder network
  input_size = len(training_input[0])
  hidden_size = 100
  auto_enc = autoencoder.Autoencoder(input_size, hidden_size)
  #auto_enc.sgd(training_input, 5, 100, 3.0)
  auto_enc.sgd(training_input, 5, 100, 3.0, test_input)
  # Save the trained autoencoder to file.
  outfile = open("trained_autoencoder.pkl", "w")
  pickle.dump(auto_enc, outfile)
  mse_error = auto_enc.test(test_input)
  print "mse_error: ", mse_error
Esempio n. 29
0
def Main():
    #Load MNIST ****************************************************************

    #mnist.pkl.gz(50000) Accuracy 0.96
    #mnist_expanded.pkl.gz(250000) Accuracy 0.97
    fn = "..\\data\\mnist.pkl.gz"  #".datamnist_expanded.pkl.gz"
    lstTrain, lstV, lstT = mnist_loader.load_data_wrapper(fn)
    lstTrain = list(lstTrain)
    lstV = list(lstV)
    lstT = list(lstT)

    # *********************************************************************
    path = "..\\TmpLogs\\"
    if not os.path.isdir(path):
        os.mkdir(path)

    fnNetworkData = "{}{}_NetData".format(path, rn.RvNeuralNetwork.__name__)
    fnSaved = ""

    #Hyper pameters -------------------------------------------
    loop = 10  # loop effect,10, 30 all above 0.95
    stepNum = 10  # stepNum effect, 10->0.9,  100->0.5
    learnRate = 0.1  # learnRate and lmbda will affect each other
    lmbda = 5.0  #add lmbda(Regularization) to solve overfitting

    # Training ***********************************************************
    # Ask DoTraining-
    DoTraining = ri.Ask_YesNo("Execute Training?", "y")
    if DoTraining:
        """
        [784,50,10], 
            loop=10, 0.9695
            loop=100, 0.9725
        """
        # 建立 RvNeuralNetWork----------------------------------------------
        inputNeusNum = len(lstTrain[0][0])
        lyr1NeuNum = 50
        lyr2NeuNum = len(lstTrain[0][1])

        lyrsNeus = [inputNeusNum, lyr1NeuNum]
        lyrsNeus = ri.Ask_Add_Array_Int("Input new layer Neurons num.",
                                        lyrsNeus, lyr1NeuNum)
        lyrsNeus.append(lyr2NeuNum)

        #net = rn.RvNeuralNetwork( \
        #   rn.RvNeuralNetwork.LayersNeurons_To_RvNeuralLayers(lyrsNeus))
        #net = rn.RvNeuralNetwork.Class_Create_LayersNeurons(lyrsNeus)
        #net = rn.RvNeuralNetwork(lyrsNeus)  # ([784,50,10])

        cnvLyrSId = 0
        lyrObjs = []

        # Add RvConvolutionLayer--------------------------------
        EnableCovolutionLayer = ri.Ask_YesNo("Add ConvolutionLayer?", "y")
        if EnableCovolutionLayer:
            lyrsNeus, cnvLyrSId, cnvLyr = Add_ConvLayer(
                lyrObjs, lyrsNeus, inputNeusNum, cnvLyrSId)

        # Create Layer Object array -------------------------------
        for iLyr in range(cnvLyrSId, len(lyrsNeus) - 1):
            lyrObjs.append(
                rn.RvNeuralLayer([lyrsNeus[iLyr], lyrsNeus[iLyr + 1]]))
        net = rn.RvNeuralNetwork(lyrObjs)

        # Ask nmtivation  ------------------_----------
        enumActivation = ri.Ask_Enum("Select Activation method.",
                                     nm.EnumActivation,
                                     nm.EnumActivation.afReLU)
        for lyr in net.NeuralLayers:
            lyr.ClassActivation, lyr.ClassCost = \
            nm.Get_ClassActivation(enumActivation)

        net.Motoring_TrainningProcess = rn.Debug

        net.NetEnableDropOut = ri.Ask_YesNo("Execute DropOut?", "n")
        if net.NetEnableDropOut:
            enumDropOut = ri.Ask_Enum("Select DropOut Method.",
                                      nm.EnumDropOutMethod,
                                      drpOut.eoSmallActivation)
            rn.gDropOutRatio = ri.Ask_Input_Float("Input DropOut ratio.",
                                                  rn.gDropOutRatio)
            net.Set_DropOutMethod(enumDropOut, rn.gDropOutRatio)

        monitoring = ri.Ask_YesNo("Watch training process?", "y")
        net.Motoring_TrainningProcess = monitoring

        # Caculate proper hyper pameters ---
        DoEvaluate_ProperParams = ri.Ask_YesNo(
            "Auto-caculating proper hyper pameters?", "n")
        if DoEvaluate_ProperParams:
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_lmbda(
                net, net.Train, lstTrain[:1000], lstV[:500], loop, stepNum,
                learnRate, lmbda)
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_learnRate(
                net, net.Train, lstTrain[:1000], lstV[:500], loop, stepNum,
                learnRate, lmbda)
        else:
            loop, stepNum, learnRate, lmbda = rf.Ask_Input_SGD(
                loop, stepNum, learnRate, lmbda)

        print(
            "Hyper pameters: Loop({}), stepNum({}), learnRatio({}), lmbda({})\n"
            .format(loop, stepNum, learnRate, lmbda))

        start = time.time()
        # 開始網路訓練-
        net.Train(lstTrain, loop, stepNum, learnRate, lstV, lmbda)

        dT = time.time() - start

        fnSaved = rf.Save_NetworkDataFile(net, fnNetworkData, loop, stepNum,
                                          learnRate, lmbda, dT)

    # Ask DoPredict----------------------------------------------------
    DoPredict = True
    if DoPredict:
        if (os.path.isfile(fnSaved)):
            fn1 = fnSaved
        else:
            fn1 = ".\\{}_NetData_DontDelete.txt".format(
                rn.RvNeuralNetwork.__name__)
        rn.Debug_Plot = True  #ri.Ask_YesNo("Plot Digits?", "n")
        #    Predict_Digits(net, lstT)
        pltFn.Predict_Digits_FromNetworkFile(fn1, lstT, rn.Debug_Plot)
import mnist_loader
from NeuralNetwork import Network

train_data, validate_data, test_data = mnist_loader.load_data_wrapper()

net = Network(configurationFileName='NetConfig')

print('Done {0} from {1}'.format(net.check(test_data), len(test_data)))
Esempio n. 31
0
import sys
Esempio n. 32
0
 def test_train(self):
     train_data, val_data, test_data = loader.load_data_wrapper()
     sizes = [784, 30, 30, 10]
     model = nn.Network(sizes)
     model.train(train_data, 30, 3.0, 25, test_data)
Esempio n. 33
0
def main():
    dimension_string = sys.argv[1]
    dimensions = map(int, dimension_string.strip('[]').split(','))
    net = NeuralNetwork(dimensions)  # map(int,input.strip('[]').split(','))
    (training_data, validation, test_data) = load_data_wrapper()
    net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
Esempio n. 34
0
import mnist_loader

import tensorflow as tf
import math
import numpy as np

import matplotlib.pyplot as plt



x_training,y_training,x_validation,y_validation,x_test,y_test = mnist_loader.load_data_wrapper()

N_training=len(x_training)
N_validation=len(x_validation)
N_test=len(x_test)

 
N_epochs = 5

learning_rate = 3.0	
batch_size = 10


N1 = 784 #equals N_inputs
N2 = 30
N3 = 30
N4 = 30
N5 = 10

N_in=N1
N_out=N5
Esempio n. 35
0
import mnist_loader,DigitNN

training_data, validation_data, test_data = \
mnist_loader.load_data_wrapper()

net = DigitNN.NeuralNetwork([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
def Main():

    #Hyper pameters -------------------------------------------
    loop = 5  # loop effect,10, 30 all above 0.95
    stepNum = 10  # stepNum effect, 10->0.9,  100->0.5
    learnRate = 0.1  # learnRate and lmbda will affect each other
    lmbda = 5.0  #add lmbda(Regularization) to solve overfitting
    dropOutRatio = rn.gDropOutRatio

    # Load net file and continune training--
    fns, fn0s = rfi.Get_FilesInFolder(".\\NetData\\", [".cnn", ".dnn"])
    aId = ri.Ask_SelectItem("Select network file", fn0s, 0)
    fn1 = fns[aId]
    #
    #    print(rfi.ExtractFilePath(fn1))
    #    print(rfi.ExtractFileName(fn1))
    #    print(rfi.ExtractFileExt(fn1))
    #    return

    if (os.path.isfile(fn1)):
        net = rn.RvNeuralNetwork(fn1)
        enumDropOut = net.NetEnumDropOut.value
        dropOutRatio = net.NetDropOutRatio
        #'BestAccuracyRatio' : net.BestAccuracyRatio
        #loop = net.Train_Loop
        learnRate = net.Train_LearnRate
        lmbda = net.Train_Lmbda
    else:
        net = None

    if (None != net):
        #Load MNIST ****************************************************************

        #mnist.pkl.gz(50000) Accuracy 0.96
        #mnist_expanded.pkl.gz(250000) Accuracy 0.97
        fn = "..\\data\\mnist.pkl.gz"  # "..\\data\\mnist_expanded.pkl.gz"
        lstTrain, lstV, lstT = mnist_loader.load_data_wrapper(fn)
        lstTrain = list(lstTrain)
        lstV = list(lstV)
        lstT = list(lstT)

        sYN = "y" if net.NetEnableDropOut else "n"
        enableDropOut = ri.Ask_YesNo("Excute DropOut?", sYN)
        if enableDropOut:
            enumDropOut = ri.Ask_Enum("Select DropOut Method.",
                                      nm.EnumDropOutMethod,
                                      drpOut.eoSmallActivation)
            rn.gDropOutRatio = ri.Ask_Input_Float("Input DropOut ratio.",
                                                  dropOutRatio)

        if enableDropOut:
            net.Set_DropOutMethod(enumDropOut, rn.gDropOutRatio)

        #Auto caculate proper Hyper pameters  ---
        loop, stepNum, learnRate, lmbda = rf.Ask_Input_SGD(
            loop, stepNum, learnRate, lmbda)

        print(
            "Hyper Pameters: Loop({}), stepNum({}), learnRatio({}), lmbda({})\n"
            .format(loop, stepNum, learnRate, lmbda))

        DoKeepTraining = True
        while (DoKeepTraining):
            start = time.time()
            # Start Training
            net.Train(lstTrain,
                      loop,
                      stepNum,
                      learnRate,
                      lstV,
                      lmbda,
                      blInitialWeiBias=False)
            dT = time.time() - start

            if net.BestAccuracyRatio > net.Get_NetworkFileData(fn1):
                rf.Save_NetworkDataFile(net, fn1, loop, stepNum, learnRate,
                                        lmbda, dT)
                print("Save Network file \"{}\"".format(fn1))

            DoKeepTraining = ri.Ask_YesNo("Continue training?", "y")
Esempio n. 37
0
KL_loss = tf.reduce_mean(KL_loss)

total_loss = recon_loss + KL_loss
train_op = tf.train.AdamOptimizer(
    learning_rate=learning_rate).minimize(total_loss)

losses = {
    'recon_loss': recon_loss,
    'total_loss': total_loss,
    'KL_loss': KL_loss,
}

saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, './VAE_mnist.ckpt')
    training_data = mnist_loader.load_data_wrapper()
    random.shuffle(training_data)
    a = tf.placeholder(tf.float32, [50000, 784])
    dataset = tf.data.Dataset.from_tensor_slices(a)
    dataset = dataset.prefetch(buffer_size=1000)
    dataset = dataset.batch(batch_size)
    iterator = dataset.make_initializable_iterator()
    next = iterator.get_next()
    # sess.run(tf.global_variables_initializer())
    print("session initialized 1")
    sess.run(iterator.initializer, feed_dict={a: training_data})

    print("iterator initialized ")
    # for i in range(5000):
    #         # print(i)
    #     if i>0 and i % (50000 // batch_size) == 0:
Esempio n. 38
0
#!/usr/bin/env/python3.5
import mnist_loader
import network2 as net
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from timeit import default_timer

# read the data
TRAINING_DATA, VALIDATION_DATA, TEST_DATA = mnist_loader.load_data_wrapper()
TRAINING_DATA, VALIDATION_DATA, TEST_DATA = list(TRAINING_DATA), list(VALIDATION_DATA), list(TEST_DATA)

# size of data sets
TRAINING_SIZE, TEST_SIZE = len(TRAINING_DATA), len(TEST_DATA)

# smaller subset for faster tuning
N_VAL_SAMPLES = 1000
SMALL_VALIDATION_DATA = VALIDATION_DATA[:N_VAL_SAMPLES]
SMALL_TRAINING_DATA = TRAINING_DATA[:5000]

# initialize constants
LAYERS = [784, 30, 10]

# training parameters
TRAINING_EPOCHS, TEST_EPOCHS = 30, 100


# initial grid search values
INITIAL_LMBDA = 0  # used for LR search
INITIAL_LEARNING_RATES = np.logspace(-3, 3, 7)
Esempio n. 39
0
def main():
    """"""
    train, val, test = load_data_wrapper()
    model = Network([784, 60, 10])
    model.SGD(train, 30, 10, 3, val)
    print('Evaluation on test: {0} / {1}'.format(model.evaluate(test), len(test)))
Esempio n. 40
0
"""
mnist_sgd
~~~~~~~~~

A classifier program for recognizing handwritten digits from the MNIST
data set, using a neural network classifier with stochastic gradient descent."""
"""
Let's start by loading in the MNIST data using a little helper program, mnist_loader.py
"""
import mnist_loader
train, val, test = mnist_loader.load_data_wrapper()

# alternative code to read MNIST data
"""
import gzip, pickle
ff = gzip.open('../data/mnist.pkl.gz','rb')
u = pickle._Unpickler( ff )
u.encoding = 'latin1'
train, val, test = u.load()
ff.close()
"""
"""
After loading the MNIST data, we'll set up a Network with 30 hidden neurons 
using the class ``network``
"""
import network
net = network.Network([784, 30, 10])
"""
Finally, we'll use stochastic gradient descent 
to learn from the MNIST training data 
over 30 epochs, 
Esempio n. 41
0
import mnist_loader as ml
import network

train_data, val_data, test_data = map(list, ml.load_data_wrapper())

print(len(train_data), len(test_data))
net = network.Network([784, 30, 10])
net.SGD(train_data, 30, 20, 0.1, test_data=test_data)
Esempio n. 42
0
import os
import sys

sys.path.append("D:/Work/neural-networks-and-deep-learning/src")

import mnist_loader
import network2_1
import numpy as np

training_data, validation_data, test_data =\
    map(list, mnist_loader.load_data_wrapper())

# No L1
net = network2_1.Network([784, 10])
net.SGD(training_data[:30000], 30, 10, 3)
print("=" * 20)
print("Accuracy unregularized: {} / {}\n".format(net.accuracy(validation_data),
                                                 len(validation_data)))

# L1 applied
print("=" * 20)
lmbdas = [0.0001, 0.01, 0.1, 0.5, 1, 2, 3, 4, 5, 7, 10]
print("Accuracy with L1: \n")
for lmbda in lmbdas:
    net = network2_1.Network([784, 10])
    net.SGD(training_data[:30000], 30, 10, 3, lmbda=lmbda, L1_ratio=1)
    print("Lambda: {}, accuracy: {} / {}".format(lmbda,
                                                 net.accuracy(validation_data),
                                                 len(validation_data)))
""" REMARKS:
Below is an example of the output when trained
############################
# Module: cs5600_6600_f20_hw03.py
# Your name
# Your A#
############################

# from network import Network
from mnist_loader import load_data_wrapper
from ann import ann
import random
import pickle as cPickle
import pathlib
import numpy as np

# load training, validation, and testing MNIST data
train_d, valid_d, test_d = load_data_wrapper()
input_layer = 784
output_layer = 10


def train_1_hidden_layer_anns(lwr=10,
                              upr=50,
                              eta=0.25,
                              mini_batch_size=10,
                              num_epochs=10):
    assert 100 >= upr > lwr >= 10
    assert lwr % 10 == 0 and upr % 10 == 0  # divisible by 10
    while lwr <= upr:
        print(f"==== Training {input_layer}x{lwr}x{output_layer} ANN ======")
        net = ann([input_layer, lwr, output_layer])
        net.mini_batch_sgd(train_d, num_epochs, mini_batch_size, eta, test_d)
            elif outputs[outputIndex] == 0 or outputs[outputIndex] == False:
                boolean = False
            else:
                raise Exception("Unacceptable (non-boolean) output " +
                                str(outputs[xIndex]))

            listOfClauses += Output(exampleID, xIndex, boolean,
                                    valDict).getClauses()

    return strSum([str(clause) for clause in listOfClauses])


output = open("nand.cnf", "w")

#data = [([0,0], [0]), ([0,1], [1]), ([1,0], [1]), ([1,1], [0])]
#output.write(makeSatInstance(data, 6))


def processRound(l):
    for i in range(len(l)):
        l[i] = [[round(x) for x in l[i][0]], l[i][1]]

    return l


data = processRound(mnist_loader.load_data_wrapper()[0][:1])
print len(data)
print "data loaded"
output.write(makeSatInstance(data, 840))
#data = [([0,0], [1]), ([0,1], [0]), ([1,0], [0]), ([1,1], [0])]
#output.write(makeSatInstance(data, 3))
Esempio n. 45
0
trainSize = 1000
testSize = 1000
#array separated by commas for multiple hidden layers
hiddenLay = [100]
#array separeted by commas for different conv sizes
'''convs = (10, 8)
#array separated by commas for differ step size during convolution. Values must match or crash/bad results
convstep = (3,1)
pool = 1'''
convs = (28, 12)
convstep = (1, 1)
pool = 2
learningRate = 1e-7
epochs = 10000

training_data_full, validation_data_full, test_data_full = mnist_loader.load_data_wrapper(
)
#print(len(training_data), len(test_data))

training_data_temp = training_data_full[:trainSize]
test_data_temp = training_data_full[:trainSize]
real_test_data = test_data_full[:testSize]

#filters = [[[-1, -1, -1],[0,0,0],[1,1,1]],[[-1, -1, -1],[0,0,0],[1,1,1]],[[-1, -1, -1],[0,0,0],[1,1,1]],[[-1, -1, -1],[0,0,0],[1,1,1]]]
'''filters = np.array([-1,-1,-1,0,0,0,1,1,1,
                    1,1,1,0,0,0,-1,-1,-1,
                    1,0,-1,1,0,-1,1,0,-1,
                    -1,0,1,-1,0,1,-1,0,1,
                    -1,-1,0,-1,0,1,0,1,1,
                    0,-1,-1,1,0,-1,1,1,0,
                    1,1,0,1,0,-1,0,-1,-1,
                    0,1,1,-1,0,1,-1,-1,0])'''
Esempio n. 46
0
 def __init__(self, learning_rate=5.0, epochs_time=10):
     self.net = network.Network([784, 30, 10])
     training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
     self.net.SGD(training_data, epochs_time, 10, learning_rate)
def Main():
    #Load MNIST ****************************************************************

    #Use mnist.pkl.gz(50000 data) Accuracy 0.96
    #Use mnist_expanded.pkl.gz(250000 data) Accuracy 0.97
    fn = "..\\data\\mnist.pkl.gz"  #".datamnist_expanded.pkl.gz"
    lstTrain, lstV, lstT = mnist_loader.load_data_wrapper(fn)
    lstTrain = list(lstTrain)
    lstV = list(lstV)
    lstT = list(lstT)

    path = "..\\TmpLogs\\"
    if not os.path.isdir(path):
        os.mkdir(path)

    fnNetworkData = "{}{}_DNN_Digit".format(path, rn.RvNeuralNetwork.__name__)
    fnNetworkData1 = ""

    #Hyper pameters -------------------------------------------
    loop = 10  # loop effect,10, 30 all above 0.95
    stepNum = 10  # stepNum effect, 10->0.9,  100->0.5
    learnRate = 0.1  # learnRate and lmbda will affect each other
    lmbda = 5.0  #add lmbda(Regularization) to solve overfitting

    sTrain = "y"

    # Training ***********************************************************
    LoadAndTrain = ri.Ask_YesNo("Load exist model and continue training?", "n")

    if LoadAndTrain:
        fns, shortFns = rfi.Get_FilesInFolder(".\\NetData\\", [".dnn"])
        aId = ri.Ask_SelectItem("Select DNN Network file", shortFns, 0)
        fn1 = fns[aId]
        net = rn.RvNeuralNetwork(fn1)
        sTrain = "n"
        initialWeiBias = False

    else:
        """
        [784,50,10], loop=100, 0.9725
        """
        # Create RvNeuralNetWork----------------------------------------------
        inputNeusNum = len(lstTrain[0][0])
        lyr1NeuNum = 50
        lyr2NeuNum = len(lstTrain[0][1])

        lyrsNeus = [inputNeusNum, lyr1NeuNum]
        lyrsNeus = ri.Ask_Add_Array_Int("Input new layer Neurons num.",
                                        lyrsNeus, lyr1NeuNum)
        lyrsNeus.append(lyr2NeuNum)

        #net = rn.RvNeuralNetwork( \
        #   rn.RvNeuralNetwork.LayersNeurons_To_RvNeuralLayers(lyrsNeus))
        #net = rn.RvNeuralNetwork.Class_Create_LayersNeurons(lyrsNeus)
        net = rn.RvNeuralNetwork(lyrsNeus)  # ([784,50,10])
        initialWeiBias = True

    digitIdOnly = 5
    digitIdOnly = ri.Ask_Input_Integer(
        "Train which Digit (0~9, -1 = All Digits): ", digitIdOnly)

    # Training ***********************************************************
    DoTrain = ri.Ask_YesNo("Do Training?", sTrain)
    if DoTrain:
        fnNetworkData = "{}_{}Lyr".format(fnNetworkData, len(net.NeuralLayers))

        net.DoPloatWeights = ri.Ask_YesNo("Plot Neurons Weights?", 'n')

        # Ask nmtivation  ------------------_----------
        enumActivation = ri.Ask_Enum("Select Activation method.",
                                     nm.EnumActivation,
                                     nm.EnumActivation.afReLU)
        for lyr in net.NeuralLayers:
            lyr.ClassActivation, lyr.ClassCost = \
            nm.Get_ClassActivation(enumActivation)

        net.NetEnableDropOut = ri.Ask_YesNo("Execute DropOut?", "n")
        if net.NetEnableDropOut:
            enumDropOut = ri.Ask_Enum("Select DropOut Method.",
                                      nm.EnumDropOutMethod,
                                      drpOut.eoSmallActivation)
            rn.gDropOutRatio = ri.Ask_Input_Float("Input DropOut ratio.",
                                                  rn.gDropOutRatio)
            net.Set_DropOutMethod(enumDropOut, rn.gDropOutRatio)

        # Auto-Caculate proper hyper pameters ---
        DoEvaluate_ProperParams = ri.Ask_YesNo(
            "Auto-Caculating proper hyper pameters?", "n")
        if DoEvaluate_ProperParams:
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_lmbda(
                net, net.Train, lstTrain[:1000], lstV[:500], loop, stepNum,
                learnRate, lmbda)
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_learnRate(
                net, net.Train, lstTrain[:1000], lstV[:500], loop, stepNum,
                learnRate, lmbda)
        else:
            loop, stepNum, learnRate, lmbda = rf.Ask_Input_SGD(
                loop, stepNum, learnRate, lmbda)

        print(
            "Hyper pameters: Loop({}), stepNum({}), learnRatio({}), lmbda({})\n"
            .format(loop, stepNum, learnRate, lmbda))

        # Start Training-
        keepTraining = True
        while (keepTraining):
            start = time.time()

            # Start Training-
            net.Train(lstTrain,
                      loop,
                      stepNum,
                      learnRate,
                      lstV,
                      lmbda,
                      initialWeiBias,
                      trainOnlyDigit=digitIdOnly)
            initialWeiBias = False

            dT = time.time() - start

            fnNetworkData1 = rf.Save_NetworkDataFile(net, fnNetworkData, loop,
                                                     stepNum, learnRate, lmbda,
                                                     dT, ".dnn")

            keepTraining = ri.Ask_YesNo("Keep Training?", "y")

    # Prediction ------------------------------------------------
    if (not os.path.isfile(fnNetworkData1)):
        fnNetworkData1= ".\\NetData\\{}_DNN_Digit.dnn". \
            format(rn.RvNeuralNetwork.__name__)

    # Ask DoPredict----------------------------------------------------
    DoPredict = True
    #    if DoTraining: DoPredict = ri.Ask_YesNo("Predict digits?", "n")
    #    else: DoPredict = True

    if DoPredict:
        rn.Debug_Plot = True  #ri.Ask_YesNo("Plot Digits?", "n")
        #    Predict_Digits(net, lstT)
        rf.Predict_Digits_FromNetworkFile(fnNetworkData1, lstT, rn.Debug_Plot)
Esempio n. 48
0
        delta = d_cost(activations[-1],
                       np.array(output).reshape(len(output), 1)) * d_sigmoid(
                           zs[-1])
        nudge_biases[-1] = delta
        nudge_weights[-1] = np.dot(delta, activations[-2].transpose())

        for i in range(2, self.num_layers):
            delta = np.dot(self.weights[-i + 1].transpose(),
                           delta) * d_sigmoid(zs[-i])
            nudge_biases[-i] = delta
            nudge_weights[-i] = np.dot(delta, activations[-i - 1].transpose())

        return nudge_biases, nudge_weights


random.seed(0)


def rand():
    return random.gauss(0, 1)


training_data, validation_data, test_data = [
    list(x) for x in mnist_loader.load_data_wrapper()
]

net = Network(784, 30, 10)
net.gradient_descent(training_data, 30, 10, 3.0, test_data)

print(2 + 2)
Esempio n. 49
0
import mnist_loader

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
import network2

# net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
# net.large_weight_initializer()
# # net.SGD(list(training_data)[:1000], 400, 10, 0.5,
# #         evaluation_data=test_data, lmbda = 0.1,
# #         monitor_evaluation_cost=True, monitor_evaluation_accuracy=True,
# #         monitor_training_cost=True, monitor_training_accuracy=True)
#
# # net.SGD(training_data, 30, 10, 0.5,
# #         evaluation_data=test_data, lmbda = 5.0,
# #         monitor_evaluation_accuracy=True, monitor_training_accuracy=True)

net = network2.Network([784, 100, 10], cost=network2.CrossEntropyCost)
net.large_weight_initializer()
net.SGD(training_data,
        30,
        10,
        0.1,
        lmbda=5.0,
        evaluation_data=validation_data,
        monitor_evaluation_accuracy=True)

# print("Sal!!!!!!!!!!!!")
#
# net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
# net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,
#         evaluation_data=validation_data,
Esempio n. 50
0
"""
    Testing code for different neural network configurations.
    Adapted for Python 3.5.2
    Usage in shell:
        python3.5 test.py
    Network (network.py) parameters:
        2nd param is epochs count
        3rd param is batch size
        4th param is learning rate (eta)
    """

# ----------------------
# - read the input data:
import mnist_loader
from expnd_dataset.expand_mnist import expand_mnist
expand_mnist()
training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    filename="mnist_expanded.pkl.gz")
training_data = list(training_data)
# ---------------------
# - network.py example:
import network

save_model = "mnist_expanded.json"
net = network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
net.save(save_model)
#net = network.load(save_model)
Esempio n. 51
0
        """
        梯度下降方法
        :type training_data: tuples(x, y)
        :param training_data: 输入,训练数据
        :param epochs: 轮数,one epoch = numbers of iterations = N = 训练样本的数量/batch size
        :param mini_batch_size: batch大小
        :param eta: 学习率
        :param test_data: 测试数据,若传入,则在每一轮(epoch)结束后用该测试数据评价一次网络
        :return: 输出tuples(x, y)
        """
        n_test = None
        if test_data:
            n_test = len(test_data)
        n = len(training_data)
        for j in range(1, epochs + 1):
            random.shuffle(training_data)
            mini_batches = [training_data[k:k + mini_batch_size] for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print('Epoch {}: {} / {}'.format(j, self.evaluate(test_data), n_test))
            else:
                print('Epoch {} complete'.format(j))


if __name__ == '__main__':
    from mnist_loader import load_data_wrapper
    training_dat, validation_dat, test_dat = load_data_wrapper()
    net = Network([784, 30, 10])
    net.sgd(training_dat, 30, 10, 1.0, test_dat)
Esempio n. 52
0
'''
    Most of this code is based on the code of the original author:
    Michał Dobrzański, 2016
    [email protected]
    Nevertheless, many changes and adjustments were made as described in the report.
'''
import mnist_loader
import network2

training_data, validation_data, test_data, input_dimension = mnist_loader.load_data_wrapper(
)

#this is done to save time in training
#mnist_loader.save(training_data, validation_data, test_data, input_dimension, 'PCA_data') #save dim_reduced data
#training_data, validation_data, test_data, input_dimension = mnist_loader.load('PCA_data') #load the data

training_data = list(training_data)
validation_data = list(validation_data)

input_layer = input_dimension
second_layer = 30
third_layer = 30
fourth_layer = 16
fifth_layer = 16
last_layer = 10
epochs = 5
batch_size = 1
eta = 0.1
lmbda = 5.0
n = len(training_data[:50000])
evaluation_data = validation_data[:10000]  #could also be test_data
Esempio n. 53
0
import matplotlib.cm as cm

# prvaite libraries---------------------------------------------
import mnist_loader
import RvNeuralNetworks as rn
from RvNeuralNetworks import *
import RvAskInput as ri
import RvMiscFunctions as rf
import RvActivationCost as ac
import PlotFunctions as pltFn

#%%

#使用 mnist_expanded.pkl.gz(250000筆) 準確率提高到 0.97
fn = ".datamnist.pkl.gz"  #".datamnist_expanded.pkl.gz"
lstTrain, lstV, lstT = mnist_loader.load_data_wrapper(fn)
lstTrain = list(lstTrain)
lstV = list(lstV)
lstT = list(lstT)

fnNetworkData1 = ".\\{}_NetData_DontDelete.txt".format(
    rn.RvNeuralNetwork.__name__)
fnNetworkData2 = ".\\{}_NetData_DropOut.txt".format(
    rn.RvNeuralNetwork.__name__)
fnNetworkData3 = ".\\{}_NetData_CnvLyr.txt".format(rn.RvNeuralNetwork.__name__)
#
accur1, t1 = pltFn.Predict_Digits_FromNetworkFile(fnNetworkData1, lstT, False)
accur2, t2 = pltFn.Predict_Digits_FromNetworkFile(fnNetworkData2, lstT, False)
accur3, t3 = pltFn.Predict_Digits_FromNetworkFile(fnNetworkData3, lstT)
print("FullConnected Layer:\n  Accu:{}, Time:{:.3} sec\n".format(accur1, t1))
print("DropOut Layer:\n  Accu:{}, Time:{:.3} sec\n".format(accur2, t2))
Esempio n. 54
0
File: digit.py Progetto: ojobabs/ML
    # Make those columns into a array of 8-bits pixels
    # This array will be of 1D with length 784
    # The pixel intensity values are integers from 0 to 255
    pixels = np.array(pixels, dtype='uint8')

    # Reshape the array into 28 x 28 array (2-dimensional array)
    pixels = pixels.reshape((28, 28))

    # Plot
    plt.title('Label is {label}'.format(label=label))
    plt.imshow(pixels, cmap='gray')
    plt.show()


# Using MNIST
train_data, val_data, test_data = mnist_loader.load_data_wrapper()

# ugly patch for changing the format in which data is stored in the array
trans = zip(*test_data[0:20])

samples = trans[0]
s_list = []

for i in range(len(samples)):
    flatten = [val for sublist in samples[i] for val in sublist]
    s_list.append(flatten)

samples = s_list
y_vec = trans[1]

# example use to show the numbers in class
Esempio n. 55
0
# module: ai_f18_hw06.py
# Kody Sanchez
# A01514541
###############################

import numpy as np
import pickle as cPickle
from sklearn import svm, datasets, metrics
from sklearn.model_selection import train_test_split
import random

from mnist_loader import load_data_wrapper

## load MNIST
mnist_train_data, mnist_test_data, mnist_valid_data = \
                  load_data_wrapper()

# define numpy arrays for MNIST data; dc stands for
# data conversion.
mnist_train_data_dc = np.zeros((50000, 784))
mnist_test_data_dc = np.zeros((10000, 784))
mnist_valid_data_dc = np.zeros((10000, 784))

# define numpy arrays for MNIST targets
mnist_train_target_dc = None
mnist_test_target_dc = None
mnist_valid_target_dc = None


# here is how we reshape mnist data for sklearn decision trees.
def reshape_mnist_d(mnist_data, mnist_data_dc):
def Main():
    #Load MNIST ****************************************************************

    #Use mnist.pkl.gz(50000 data) Accuracy 0.96
    #Use mnist_expanded.pkl.gz(250000 data) Accuracy 0.97
    fn = "..\\data\\mnist.pkl.gz"  #".datamnist_expanded.pkl.gz"
    lstTrain, lstV, lstT = mnist_loader.load_data_wrapper(fn)
    lstTrain = list(lstTrain)
    lstV = list(lstV)
    lstT = list(lstT)

    # Load digit Images  lstTrain[0].shape = (pxls, label) = (784, 1)
    #    imgPxls = lstTrain[0][0].shape[0]
    #    digitImages = rn.RvNeuralEnDeCoder.Load_DigitImages( ".\\Images\\Digits\\", imgPxls)

    path = "..\\TmpLogs\\"
    rfi.ForceDir(path)

    fnNetworkData = "{}{}_EDC".format(path, rn.RvNeuralEnDeCoder.__name__)

    #Hyper pameters -------------------------------------------
    loop = 10  # loop effect,10, 30 all above 0.95
    stepNum = 10  # stepNum effect, 10->0.9,  100->0.5
    learnRate = 0.1  # learnRate and lmbda will affect each other
    lmbda = 5.0  #add lmbda(Regularization) to solve overfitting

    endecoder = None

    sTrain = "y"

    # Training ***********************************************************
    # Ask DoTraining-
    LoadAndTrain = ri.Ask_YesNo("Load exist model and continue training?", "n")

    if LoadAndTrain:
        digitIdOnly = -1
        fns, shortFns = rfi.Get_FilesInFolder(".\\NetData\\", [".endecoder"])
        aId = ri.Ask_SelectItem("Select EnDecoder file", shortFns, 0)
        fn1 = fns[aId]
        endecoder = rn.RvNeuralEnDeCoder(fn1)
        initialWeights = False
        sTrain = "n"
        encoder, decoder = endecoder.Get_Encoder_Decoder()

    else:
        """
        [784,50,10], loop=100, 0.9725
        """
        #buildDeNoiseModel = ri.Ask_YesNo("Build DeNoise Model?", "n")

        # Create RvNeuralEnDeCoder----------------------------------------------
        inputNeusNum = len(lstTrain[0][0])
        #lyr2NeuNum = len(lstTrain[0][1])

        # [784,256,128,10] is suggested ----------------------------
        # [784, 256, 128, 10, 128, 256, 784 ] -> 0.9395 ... tested 10 epochs
        # [784, 400, 20, 400, 784] -> 0.9526 ... tested 5 epochs
        lyrsNeus = [inputNeusNum, 50]  # 512, 256,128]
        lyrsNeus = ri.Ask_Add_Array_Int(\
            "Input new layer Neurons num.", lyrsNeus, 50)

        bottleneckNeuNum = ri.Ask_Input_Integer(\
            "Input BottleNeck(Code) Layer Neurons num.", 10)
        lyrsNeus.append(bottleneckNeuNum)
        for nNeu in reversed(lyrsNeus[1:-1]):
            lyrsNeus.append(nNeu)
        lyrsNeus.append(inputNeusNum)

        digitIdOnly = -1
        digitIdOnly = ri.Ask_Input_Integer(
            "Build only Digit (0~9, -1 = All Digits): ", digitIdOnly)

        #net = RvNeuralEnDeCoder(
        #   RvNeuralEnDeCoder.LayersNeurons_To_RvNeuralLayers(lyrsNeus))
        #net = RvNeuralEnDeCoder.Class_Create_LayersNeurons(lyrsNeus)
        endecoder = rn.RvNeuralEnDeCoder(lyrsNeus)  # ([784,50,10])
        initialWeights = True

    # Training ***********************************************************
    DoTrain = ri.Ask_YesNo("Do Training?", sTrain)
    if DoTrain:
        fnNetworkData = "{}_{}Lyr".format(fnNetworkData,
                                          len(endecoder.NeuralLayers))

        endecoder.DoPloatWeights = ri.Ask_YesNo("Plot Neurons Weights?", 'n')

        # Ask nmtivation  ------------------_----------
        enumActivation = ri.Ask_Enum("Select Activation method.",
                                     nm.EnumActivation,
                                     nm.EnumActivation.afSigmoid)
        for lyr in endecoder.NeuralLayers:
            lyr.Set_EnumActivation(enumActivation)

        endecoder.NetEnableDropOut = ri.Ask_YesNo("Execute DropOut?", "n")
        if endecoder.NetEnableDropOut:
            enumDropOut = ri.Ask_Enum("Select DropOut Method.",
                                      nm.EnumDropOutMethod,
                                      drpOut.eoSmallActivation)
            rn.gDropOutRatio = ri.Ask_Input_Float("Input DropOut ratio.",
                                                  rn.gDropOutRatio)
            endecoder.Set_DropOutMethod(enumDropOut, rn.gDropOutRatio)

        # Auto-Caculate proper hyper pameters ---
        DoEvaluate_ProperParams = ri.Ask_YesNo(
            "Auto-Caculating proper hyper pameters?", "n")
        if DoEvaluate_ProperParams:
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_lmbda(
                endecoder, endecoder.Train, lstTrain[:1000], lstV[:500], loop,
                stepNum, learnRate, lmbda)
            loop, stepNum, learnRate, lmbda = rf.Evaluate_BestParam_learnRate(
                endecoder, endecoder.Train, lstTrain[:1000], lstV[:500], loop,
                stepNum, learnRate, lmbda)
        else:
            loop, stepNum, learnRate, lmbda = rf.Ask_Input_SGD(
                loop, stepNum, learnRate, lmbda)

        print(
            "Hyper pameters: Loop({}), stepNum({}), learnRatio({}), lmbda({})\n"
            .format(loop, stepNum, learnRate, lmbda))

        # Start Training-
        keepTraining = True
        while (keepTraining):
            start = time.time()

            encoder, decoder = endecoder.Build_Encoder_Decoder( \
              lstTrain, loop, stepNum, learnRate, lmbda, initialWeights, digitIdOnly)
            initialWeights = False

            dT = time.time() - start

            rf.Save_NetworkDataFile(endecoder, fnNetworkData, loop, stepNum,
                                    learnRate, lmbda, dT, ".endecoder")
            fn1 = rf.Save_NetworkDataFile(encoder,
                                          "{}_Encoder".format(fnNetworkData),
                                          loop, stepNum, learnRate, lmbda, dT,
                                          ".encoder")
            fn2 = rf.Save_NetworkDataFile(decoder,
                                          "{}_Decoder".format(fnNetworkData),
                                          loop, stepNum, learnRate, lmbda, dT,
                                          ".decoder")

            keepTraining = ri.Ask_YesNo("Keep Training?", "y")

        decoder = rn.RvNeuralEnDeCoder(fn2)
        encoder = rn.RvNeuralEnDeCoder(fn1)

    noiseStrength = ri.Ask_Input_Float("Input Noise Strength.", 0.7)
    rf.Test_Encoder_Decoder(encoder, decoder, lstT, 10, "", noiseStrength)
Esempio n. 57
0
def main():
    config = configparser.ConfigParser()
    config.read('config_mcsample.ini')

    # select correct data
    train_size = int(config['DATA']['train_size'])
    test_size = int(config['DATA']['test_size'])

    if config['DATA']['dataset'] == "simulated":
        num_cov = int(config['DATA']['num_cov'])
        mu = float(config['DATA']['mu'])
        std = float(config['DATA']['std'])
        range_cov = float(config['DATA']['range_cov'])
        range_coef = float(config['DATA']['range_coef'])
        range_bias = float(config['DATA']['range_bias'])
        generator = generate_data.GenerateData(
            num_cov, mu, std, range_cov, range_coef, range_bias,
            seed=100)  # Maybe add to config file..
        X_train, y_train, _ = generator.generate(seed=15,
                                                 sample_size=train_size)
        X_test, y_test, _ = generator.generate(seed=16, sample_size=test_size)
        network_mcsample_plots(X_train, X_test, y_train, y_test,
                               config['RS NN PARAMS'], config['MCSAMPLE'])

    if config['DATA']['dataset'] == "mnist":
        train_full, validate_full, test_full = mnist_loader.load_data_wrapper(
        )  # we wont need validate dataset
        X_train = np.array(train_full[0][:train_size])
        y_train = np.array(train_full[1][:train_size])
        X_test = np.array(test_full[0][:test_size])
        y_test = np.array(test_full[1][:test_size])
        network_mcsample_plots(X_train, X_test, y_train, y_test,
                               config['RS NN PARAMS'], config['MCSAMPLE'])

    if config['DATA']['dataset'] == "iris":
        from sklearn import datasets
        from sklearn.model_selection import KFold
        fold = True
        data = datasets.load_iris()
        y_train = pd.get_dummies(data.target).values
        X_train = data.data
        splits = int(config['DATA']['splits'])  # Number of splits selected.
        kf = KFold(splits)
        kf.get_n_splits(X_train)
        kf.split(X_train)
        Xtr = []
        Xtest = []
        ytr = []
        ytest = []
        for train_index, test_index in kf.split(X_train):
            # train
            # print("TRAIN:", train_index, "TEST:", test_index)
            Xtr.append(X_train[train_index])
            ytr.append(y_train[train_index])
            # test
            Xtest.append(X_train[test_index])
            ytest.append(y_train[test_index])
        network_mcsample_plots(Xtr,
                               Xtest,
                               ytr,
                               ytest,
                               config['RS NN PARAMS'],
                               config['MCSAMPLE'],
                               fold=fold)
"""
Python

Created by Zhixuan Wang  01/29/2015 14:11

this script uses the method provided to train and predict

"""

# Libraries
import mnist_loader as loader
import network as nw

data = loader.load_data_wrapper()
training_data = data[0]
validation_data = data[1]
test_data = data[2]

net = nw.Network([784, 100, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
Esempio n. 59
0
	for i in xrange(length):
		# print('i {}'.format(i))
		x = raw_data[i][0]
		y = raw_data[i][1]
		x_final = []
		y_final = []
		x_len = len(np.atleast_1d(x))
		y_len = len(np.atleast_1d(y))
		for k in xrange(x_len):
			x_k = x[k][0]
			x_final.append(x_k)

		for j in xrange(y_len):
			y_j = y[j][0]
			y_final.append(y_j)
		finalData.append((x_final, y_final))

	print('Formatting ended')
	return finalData

if __name__ == '__main__':

	print('Loading starts')
	training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
	print('Loading ended')

	network = MultiLayerNetwork([784, 30, 10])

	net = Network([784,30,10])

	network.SGD(training_data, 30, 10, 3.0, test_data=test_data)
Esempio n. 60
0
def test():
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = Network([784,30,10])
    net.SGD(training_data, 30, 10, 3.0)