Exemple #1
0
def main():
    print("Image:")
    image = input("")

    features = finding_face_landmark.finding_face_landmark(image)
    if (len(features) == 0):
        exit(0)

    data_file_name = "features.csv"
    X, Y, Q = utils.get_data(data_file_name, 2000)

    x_min, x_max = utils.get_min_max(X)
    X = utils.normalize_features(x_min, x_max, X)

    test_file_name = "test.csv"
    T, P, L = utils.get_data_test(test_file_name, x_min, x_max, len(X), Q, Y)

    model_file_name = './my_test_model.ckpt'
    neural_network = n.Neural_Network(X, Y, model_file_name)
    # neural_network.training()
    # neural_network.test(T,P)

    features = utils.normalize_features(x_min, x_max, features)

    predict = neural_network.predict([features])
    image_path = Q[predict][0].strip()

    metadata = 'C:\\ProjekatSoft\\wiki_crop\\wiki.mat'
    name = utils.get_name(image_path, metadata)

    percent = utils.get_percent(features, X[predict:predict + 1, :15][0])
    utils.show_image('C:\\ProjekatSoft\\wiki_crop\\' + image_path, name,
                     percent)
Exemple #2
0
def ns_loop(data, answers, zet_path):
    model = neural_network.Neural_Network()
    while True:
        os.system('cls')
        if model.learned:
            model.model.summary()
        print('Сеть обучена:', model.learned)
        key = int(
            input("1. Обучить нейронную сеть.\n"
                  "2. Сохранить нейронную сеть.\n"
                  "3. Загрузить нейронную сеть.\n"
                  "4. Запуск\n"
                  '5. Графики\n'
                  '6. TT\n'
                  "0. Назад.\n"))
        if key == 1:
            model.fit_model(data, answers)
        elif key == 2:
            model.save_neural_network()
        elif key == 3:
            model.load_neural_network()
        elif key == 4:
            model.work_neural_network(app, zet_path)
        elif key == 5:
            model.plotting()
        elif key == 6:
            model.test()
        elif key == 0:
            return
Exemple #3
0
    def __init__(self, ai):
        self.heros = []
        self.ai = ai
        network_data_list = self.ai.manager.create_Generation()

        for network_data in network_data_list:
            network = neural_network.Neural_Network(config.network[0],
                                                    config.network[1],
                                                    config.network[2])
            network.setNetwork(network_data)
            hero = Hero(network)
            self.heros.append(hero)
Exemple #4
0
 def __first_generation(self):
     '''
     创建第一个世代
     :return: 世代中所有个体的神经网络数据
     '''
     network_data_list = []
     for i in range(config.population):
         network = neural_network.Neural_Network(config.network[0],
                                                 config.network[1],
                                                 config.network[2])
         network_data_list.append(network.getNetwork())
     return network_data_list
Exemple #5
0
def test_dropout_shake():
    structure = [3, 4, 2]
    dropout = [0, 0.9, 0]
    dnn = nn.Neural_Network(structure, dropout)
    dnn.do[0] = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1]])
    dnn._Neural_Network__dropout_shake(False)
    assert np.all(dnn.do[0] == np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
    assert np.all(dnn.do[1] == np.array([
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
    ]))
def main (args=None):
    if args is None:
        args = sys.argv[1:]

    # gather data set
    dataX = d.x
    dataY = d.y

    # shuffle to ensure random testing data set
    np.random.shuffle(dataX)
    np.random.shuffle(dataY)

    # split up the data set
    training_split = .5
    train_x_index = int(math.floor(len(dataX)*training_split))
    train_y_index = int(math.floor(len(dataY)*training_split))
    test_x_index = int(math.floor(len(dataX)*(1-training_split)))
    test_y_index = int(math.floor(len(dataY)*(1-training_split)))

    # training data
    trainX = np.array(tuple(dataX[:train_x_index]), dtype=float)
    trainY = np.array(tuple(dataY[:train_y_index]), dtype=float)

    # testing data
    testX = np.array(tuple(dataX[test_x_index:]), dtype=float)
    testY = np.array(tuple(dataY[train_y_index:]), dtype=float)

    # normalize inputs
    trainX = trainX/np.amax(trainX, axis=0)
    testX = testX/np.amax(testX, axis=0)

    # evaluate number of input/ouput neurons
    inputs = len(trainX[0])
    outputs = len(trainY[0])

    # train neural net
    NN = nn.Neural_Network(inputs, outputs, Lambda=0.001)
    T = t.trainer(NN)
    T.train(trainX, trainY, testX, testY)

    # print results
    results = NN.forward(testX)
    i = 0
    for r in results:
        line = "Prediction: " + str("{:4.2f}").format(r[0]) + "; "
        line += "Actual: " + str("{:4.2f}").format(testY[i][0]) + "; "
        line += "Delta: " + str("{:4.2f}").format(testY[i][0] - r[0]) + "; "
        print line
        i = i + 1
Exemple #7
0
def test_main_or_without_dropout():
    datasetsize = 8
    batchsize = 4
    td_num = 2
    tl_num = 2
    # set data
    trainData = np.ones([datasetsize, batchsize, td_num])
    trainLabel = np.zeros([datasetsize, batchsize, tl_num])
    # ニューラルネットワークの生成
    structure = [td_num + 1, 3, tl_num]
    myNN = nn.Neural_Network(structure)
    epoch = 10
    # 学習
    for i in range(epoch):
        myNN.train(trainData, trainLabel)
Exemple #8
0
def test_forwordpropagation():
    batchsize = 4
    td_num = 2
    tl_num = 2
    test = np.array([[0.51301977, 0.52648607], [0.51301977, 0.52648607],
                     [0.51301977, 0.52648607], [0.51301977, 0.52648607]])
    # set data
    trainData = np.ones([batchsize, td_num])
    # ニューラルネットワークの生成
    structure = [td_num + 1, 3, tl_num]
    myNN = nn.Neural_Network(structure, w_method="test")
    # test
    myNN.forwardpropagation(trainData, batchsize)
    assert np.allclose(
        myNN.z[-1],
        test), "dnn/neural_network/forwardpropagaiton is output error"
    def game_init(self):
        self.Running = True
        self.line = Line()

        weight_array = np.loadtxt("./res/my_modle.csv")
        network = neural_network.Neural_Network(config.network[0],
                                                config.network[1],
                                                config.network[2])
        data = network.getNetwork()
        if len(data['weights']) == len(weight_array):
            for i in range(len(data['weights'])):
                data['weights'][i] = weight_array[i]
        network.setNetwork(data)
        self.hero = Hero(network)

        self.score = Score()
        self.obstacal_manager = ObstacleManager(self.surface, self.score,
                                                self.hero)
def train(path_to_images, csv_file):
    '''s
        Method to perform preprocessing on input images.
        Args:
        path_to_images = path to jpg image files
        csv_file = path and filename to csv file containing frame numbers and steering angles.
        Returns:
        NN = Trained Neural Network object
        '''
    
    # Import Steering Angles CSV
    data = np.genfromtxt(csv_file, delimiter = ',')
    frame_nums = data[:,0]
    steering_angles = data[:,1]
    
    im_train_X = []
    for frame_number in range(len(frame_nums)):
        temp = cv2.imread(path_to_images + '/' + str(int(frame_number)).zfill(4) + '.jpg')
        temp_resized = cv2.resize(temp, (60, 64))
        temp_crop = crop_center(temp_resized)
        temp_lane = select_rgb_white_yellow(temp_crop)
        temp_gray = convert_to_grayscale(temp_lane)
        im_train_X.append(list(temp_gray.ravel()))

    rawangles = steering_angles
    steering_angles = signal_sci.savgol_filter(steering_angles, 3, 2)
    encoded_angles = encoder(steering_angles)

    trainX = np.array(im_train_X)
    trainY = encoded_angles
    trainX = trainX / 255
    
    NN = ann.Neural_Network(rawangles)
    T = ann.trainer(NN)
    
    T.train(trainX,trainY)
    
    return NN
Exemple #11
0
    def create_next_generation(self):
        #1.选取精英个体直接遗传
        network_data_list = []
        for i in range(round(config.population * config.elite)):
            network_data_list.append(self.genomes[i].data)
        #2.创建一部分随机个体
        for i in range(round(config.population * config.new_bron)):
            network = neural_network.Neural_Network(config.network[0],
                                                    config.network[1],
                                                    config.network[2])
            network_data_list.append(network.getNetwork())

        while True:
            if len(network_data_list) == config.population:
                break
            father = self.genomes[random.randint(
                0,
                round(config.population / 2) - 1)]
            mother = self.genomes[random.randint(round(config.population / 2),
                                                 config.population - 1)]
            child = self.breed(father, mother)
            network_data_list.append(child.data)
        return network_data_list
#model.add(layers.Dense(512, activation='relu', input_shape=(8, )))
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dense(1, activation='sigmoid'))

#model.compile(optimizer='Adam',
              #loss='binary_crossentropy',
              #metrics=['accuracy'])
#model.fit(X_train, y_train, epochs=150, batch_size=16)

#Y_pred = model.predict(X_val)
#score=model.evaluate(X_val,y_val,verbose=0)
#nnh.kerasEval(Y_pred,y_val)
#print(score)

neuralNatwork = nn.Neural_Network(X_train,X_val,y_train,y_val,id_train,id_val,nnConfig)
neuralNatwork.initializeWeights()

neuralNatwork.trainNetwork()
print(np.mean((neuralNatwork.initialNNParams==neuralNatwork.trainedParams).astype(int)))

nnh.predict(neuralNatwork.Xtrain,neuralNatwork.yTrain,neuralNatwork.trainedParamsShaped)
nnh.predict(neuralNatwork.Xval,neuralNatwork.yVal,neuralNatwork.trainedParamsShaped)
testPred=nnh.testPred(X_test,neuralNatwork.trainedParamsShaped)
print(id_test.shape)
testRes=pd.DataFrame(data=np.column_stack((id_test,testPred)),columns=['PassengerId','Survived'])
testRes=testRes.astype(int)


#testRes=testRes.drop(testRes.columns[0],axis=1)
print(testRes)
Exemple #13
0
    print()
    print(description)
    
    # Build training data
    grids = []
    training_data = []
    grids.append(init_grid)
    for i in range(frame_count):
        grids.append(next_grid(grids[i], my_rule))
        training_data.extend(next_training_data(grids[i], my_rule))
        print_grid(grids[i+1], height, width)

    test_points, test_inputs = input_data(grids[frame_count])
    grids.append(next_grid(grids[frame_count], my_rule))

    my_neural_network = nn.Neural_Network(num_inputs, num_outputs, hidden_layer_sizes)

    #
    # Train network
    #

    start_time = time.perf_counter()
    my_neural_network.train_in_batches(training_data, 10, 10000, learning_rate)
    print("Training took: ", time.perf_counter() - start_time)

    #
    # Print answer
    #

    prediction_grid = {}
    for point, test_input in zip(test_points, test_inputs):
Exemple #14
0
import preprocess
import neural_network as net
import numpy as np
import time
import csv

NN = net.Neural_Network()
NN.W1 = np.loadtxt("saved_data/W1.txt")
NN.W2 = np.loadtxt("saved_data/W2.txt")
x = np.loadtxt("saved_data/x.txt")
y = np.loadtxt("saved_data/y.txt")


def train():
    time_taken = 0
    training_size = input("What size would you like the training set to be?: ")
    print()
    print("ok, building the training data ...")

    start = time.time()
    x, y, samples = preprocess.get_training_data(int(training_size))
    T = net.trainer(NN)

    print()
    print("Now training the network.")
    print()
    T.train(x, y)
    end = time.time()
    time_taken = end - start

    np.savetxt("saved_data/W1.txt", NN.W1)
Exemple #15
0
import doc_maker
import unbuffered
import cleaner

s, timestamp, commitid, branchname = doc_maker.getdata("dnn")
stdout_stream = io.StringIO()
sys.stdout = unbuffered.Unbuffered(sys.stdout, stdout_stream)

datasize = 20
batch = 4
logic = "and"
# set data
trainData, trainLabel, testData, testLabel = dataset.logic(
    logic, datasize, batch)
# ニューラルネットワークの生成
structure = [2 + 1, 5, 2]
myNN = nn.Neural_Network(structure)
# # 学習
epoch = 1000
for i in range(epoch):
    myNN.train(trainData, trainLabel)
    myNN.test(testData, testLabel)

doc_maker.docmaker(s, timestamp, stdout_stream.getvalue(), commitid,
                   branchname)
atool.draw(myNN.cost, timestamp)
atool.accurancygraph(myNN.accurancy, timestamp)
#atool.tdchart(myNN)
npfiles.save(myNN.weight, timestamp)
cleaner.clean()
Exemple #16
0
# WOW

import numpy as np
import neural_network as neurn

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)


X = X/np.amax(X, axis=0)
y = y/100


nn = neurn.Neural_Network()

xPredicted = np.array(([4,8]), dtype=float)
xPredicted = xPredicted / np.amax(xPredicted, axis=0)


# V2

for i in range(100000):
    print("Input: \n" + str(X))
    print("Actual Output: \n" + str(y))
    print("Predicted Output: \n" + str(nn.forward(X)))
    print("Loss: \n" + str(np.mean(np.square(y - nn.forward(X)))))
    print("\n")
    nn.train_AI(X, y)

nn.predict(xPredicted)
Exemple #17
0
x_train = combined_train[sample].values
y_train = np.array([td.targets[sample].values]).T

x_cv = combined_train[~sample].values
y_cv = np.array([td.targets[~sample].values]).T

x_test = td.combined[891:].values

print('combined: ', td.combined.shape, ', x_train: ', x_train.shape,
      ', x_cv: ', x_cv.shape, ', x_test: ', x_test.shape)
print('\n\n')

NN = nn.Neural_Network(dimensionality=x_train.shape[1],hidden_size=400,\
        output_size=1,learning_rate=0.01,\
        dropout_hidden_rate=0.5,do_dropout=True,\
        error_function='cross_entropy',do_regularize=True,\
        regularization_rate=10,add_bias=True,use_nesterov_momentum=False,\
        momentum_rate=0.5,do_random_seed=True,random_seed=1)

#start_time = time.time()

#for i in range(1000):
#    NN.learn_using_gradient_descent(x=x_train,y=y_train,current_iteration=i,\
#    print_loss_every=100,clip=True)

#print('\n\n')
#print("--- %s seconds ---" % (time.time() - start_time))
#print('\n\n')
#print('Accuracy on CV with gradient descent: ', NN.accuracy(x_cv,y_cv, 0.5))
#print('\n\n')
dummy_rows = []
for i in np.arange(0, len(settings)):
    if len(settings[i, 0]) == 1:
        dummy_rows.append(i)
settings = np.delete(settings, dummy_rows, axis=0)
print(settings)

models = []
for i in np.arange(0, len(settings)):
    hn = settings[i, 0]
    lr = settings[i, 1]
    # mr = settings[i, 2]

    mr = 'NA'
    print(hn)
    models.append(nn.Neural_Network(size_input, hn))
    models[i].train(X_train, y_train, X_val, y_val, lr, number_of_epochs,
                    batch_size, mr)
    plt.figure(i)
    plt.subplot(211)
    plt.title("HN: {0}, LR: {1}, MR: {2}".format(hn, lr, mr))
    # plt.xlabel('Epoch')
    plt.ylabel('Average Loss Value')
    plt.plot(models[i].training_history, 'k-', models[i].validation_history,
             'r-')
    plt.subplot(212)
    plt.plot(models[i].training_history_bin_err, 'b-',
             models[i].validation_history_bin_err, 'r-')
    plt.xlabel('Epoch')
    plt.ylabel('Average Binary Error')
    plt.savefig('figure_' + str(i) + '.svg')
Exemple #19
0
def test_save_weight():
    structure = [2 + 1, 3, 3]
    test_nn = nn.Neural_Network(structure)
    npfiles.save(test_nn.weight, "test")
Exemple #20
0
 def get_neural_network(self):
     return neural_network.Neural_Network(self.objvars)
Exemple #21
0
import sys

if __name__ == '__main__':
    print 'network training'
    datapath = 'parameter/mnist_dropout/'
    #datapath = 'parameter/init_params/'

    training_data, validation_data, test_data = loader.load_data_wrapper()

    epochs = 50
    mini_batch_size = 1
    learning_rate = 0.01
    dropout_rate = (0.8, 0.9)

    net = network.Neural_Network([784, 30, 10], dropout_rate)
    net.set_test(test_data)
    net.set_validation(validation_data)
    train = True
    #train = False
    if train:
        #net.load_parameter(path=datapath)
        net.train(training_data, epochs, mini_batch_size, learning_rate)
        print 'save parameter? (y/n)'
        if 'y' in sys.stdin.readline():
            net.save_parameter(path=datapath)
            print 'Saved'
    else:
        net.load_parameter(path=datapath)
        net.feed_forward(test_data)
Exemple #22
0
class Logic(object):
    logic_and = [[[0, 0], 0], [[0, 1], 0], [[1, 0], 0], [[1, 1], 1]]
    logic_or = [[[0, 0], 0], [[0, 1], 1], [[1, 0], 1], [[1, 1], 1]]
    logic_exor = [[[0, 0], 0], [[0, 1], 1], [[1, 0], 1], [[1, 1], 0]]


def vectorize(x):
    ret = np.zeros((2, 1))
    ret[x] = 1
    return ret


if __name__ == '__main__':
    print 'network training'
    datapath = 'parameter/logic/or/'
    logic = Logic()
    data = logic.logic_or

    training_data = [(np.array(x), vectorize(y)) for x, y in data]
    test_data = [(np.array(x), y) for x, y in data]

    epochs = 300
    mini_batch_size = 1
    learning_rate = 0.5

    net = network.Neural_Network([2, 3, 2])
    net.train(training_data, epochs, mini_batch_size, learning_rate)
    net.save_parameter(datapath)
    net.feed_forward(test_data)