def __init__(self):
        # Objects
        self.rw = comClient()           #Initialise Modbus comms class 
        self.xls = xlsLogging(4)        #Initialise excel data logging
        self.pg = plotActiveGraph()     #Initialise graphical plot
        self.NNctrl = neuralNetwork()        #Initialise Neural Network
        if int(sys.argv[1]) == 1:
            self.r = testModel("../../tests/")        #Initialise simulated lab rig

        # Variables
        self.count = 0                  #For 'heart beat' counter
        self.sampleTime = 10
Example #2
0
def nn_test(model_adress, piece_range_start=0, piece_range_over=10):
    data_x_test, y_vector_test = get_data(piece_range_start, piece_range_over)
    net = neuralNetwork.neuralNetwork()
    with open(model_adress, 'rb') as f:
        print("[model]is opening the model, {}".format(model_adress))
        nn_para = pickle.load(f)
        net.nn_load(nn_para[0], nn_para[1], nn_para[2], nn_para[3], nn_para[4], nn_para[5])
        max_value = nn_para[6]
    y_test = []
    for x_test in data_x_test:
        x_test[0] = x_test[0]/max_value[0]*0.99+0.01
        x_test[1] = x_test[1]/max_value[1]*0.99+0.01
        x_test[2] = x_test[2]/max_value[2]*0.99+0.01
        y_test.append(net.query(x_test).tolist()[0][0])
    return y_test
def onActivatedRestart(self):
    print("start a new neuralNetwork")
    inputNodes = int(self.inputNodes.text())
    outputNodes = int(self.outputNodes.text())
    hiddenNodes = int(self.hiddenNodes.text())
    learningRate = float(self.learningRate.text())
    activationFunction = self.activateFunction.checkedId()
    randomMode = self.randomMode.checkedId()
    seedWih = int(self.WihSeed.text())
    seedWho = int(self.WhoSeed.text())
    noOfHiddenLayer = int(self.noOfHiddenLayer.text())
    self.nN = nNPackage.neuralNetwork(inputNodes, hiddenNodes, outputNodes,
                                      learningRate, activationFunction,
                                      randomMode, seedWih, seedWho,
                                      noOfHiddenLayer)
    self.nN.checkParameter()

    self.captureWidth = 640
    self.captureHeight = 120
    pass
def main():
    X, Y, m = get_data()
    m = X.shape[0]
    layer_sizes = [400, 25, 10]
    N = len(layer_sizes)
    print('Initializing Neural Network Parameters')
    layers = [
        L.layer(layer_sizes[i], layer_sizes[i + 1]) for i in range(0, N - 1)
    ]
    nn = NN.neuralNetwork(layers, regularization=1, batch_size=m)
    print(nn)
    print('Training Neural Network ')
    sys.stdout.flush()
    nn = NN.gradient_decent_adam_new(nn, X, Y)
    pred = nn.predict(X)
    array_correct = [1 if x == p else 0 for x, p in zip(pred, Y)]
    array_incorrect = [1 if x != p else 0 for x, p in zip(pred, Y)]
    print('Training Set Correct:   {}'.format(np.sum(array_correct)))
    print('Training Set Incorrect: {}'.format(np.sum(array_incorrect)))
    print('Training Set Accuracy:  {}'.format(np.mean(array_correct) * 100))
Example #5
0
def nn_train(piece_range_start=50, piece_range_over=100, input_nodes=3, hidden_nodes=7, output_nodes=1, learning_rate=0.03):
    start = time.time()
    data_x_train, y_vector_train = get_data(piece_range_start, piece_range_over)
    #print(data_x_train)
    max_value = [max(data_x_train[:, 0]), max(data_x_train[:, 1]), max(data_x_train[:, 2]), max(y_vector_train)]
    row, col = data_x_train.shape
    print("[train]data_x_train.shape={}".format(data_x_train.shape))
    print("[train]len(y_vector_train)={}".format(len(y_vector_train)))
    net = neuralNetwork.neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
    for i in range(row):
        for m in range(col):
            data_x_train[i, m] = data_x_train[i, m] / max_value[m] * 0.99 + 0.01
        y_vector_train[i] = y_vector_train[i]/max_value[3]*0.99+0.01
        net.train(data_x_train[i], y_vector_train[i])
    print("[train]train spent", (time.time() - start), "s")
    save_nn_data = net.nn_dump()
    save_nn_data.append(max_value)
    with open('./models/nn_model.pkl', 'wb') as f:
        print("[train]is saving the model as ./models/nn_model.pkl")
        pickle.dump(save_nn_data, f)
        print("[train]save successfully")
Example #6
0
    def constructNetwork(self, nLayers, nNodes, activation=tf.nn.sigmoid, \
                         wInit='normal', bInit='normal', stdDev=1.0, constantValue=0.1):

        self.nLayers = nLayers
        self.nNodes = nNodes
        self.activation = activation
        self.wInit = wInit
        self.bInit = bInit

        # print out...
        print
        print "##### network parameters #####"
        print "Inputs: ", self.inputs
        print "Outputs: ", self.outputs
        print "Number of layers: ", nLayers
        print "Number of nodes: ", nNodes
        print "Activation function: ", activation.__name__
        print "Weight initialization: ", wInit
        print "Bias initialization: ", bInit
        print "Setting up NN..."

        # input placeholders
        with tf.name_scope('input'):
            self.x = tf.placeholder('float', [None, self.inputs],
                                    name='x-input')
            self.y = tf.placeholder('float', [None, self.outputs],
                                    name='y-input')

        self.neuralNetwork = nn.neuralNetwork(nNodes,
                                              nLayers,
                                              activation,
                                              weightsInit=wInit,
                                              biasesInit=bInit,
                                              stdDev=stdDev,
                                              inputs=self.inputs,
                                              outputs=self.outputs,
                                              constantValue=constantValue)
        self.makeNetwork = lambda data: self.neuralNetwork.model(self.x)
def main(_):
    # Get the arguments from the command line
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--inputZip',
        action='store',
        dest='inputZip',
        help=
        'Input zip file which contains the datasets & the parameters for the classifier'
    )
    parser.add_argument(
        '--outputZip',
        action='store',
        dest='outputZip',
        help=
        'Input zip file which the network trained and the results of the classification'
    )

    # parser.add_argument('-inputFile', action='store', dest='inputFile', help='Input file to classify', default = "")

    args = parser.parse_args()

    inputZip = args.inputZip
    outputZip = args.outputZip
    # inputFile = args.inputFile

    basedir = os.path.dirname(inputZip)
    nameDir = os.path.splitext(os.path.basename(inputZip))[0]
    networkDir = os.path.join(basedir, nameDir)

    ouputbaseDir = os.path.dirname(outputZip)
    outputName = os.path.splitext(os.path.basename(outputZip))[0]
    outputPath = os.path.join(ouputbaseDir, outputName)

    if os.path.isdir(networkDir):
        shutil.rmtree(networkDir)
    os.mkdir(networkDir)

    # Unpack archive
    with zipfile.ZipFile(inputZip) as zf:
        zf.extractall(networkDir)
        zf.extractall(basedir)

    jsonFile = os.path.join(networkDir, 'classifierInfo.json')
    saveModelPath = os.path.join(networkDir, 'CondylesClassifier')
    pickleToClassify = os.path.join(networkDir, 'toClassify.pickle')
    #
    # Create a network for the classification
    #
    if sys.version_info[0] == 3:
        with open(jsonFile, encoding='utf-8') as f:
            jsonDict = json.load(f)
    else:
        with open(jsonFile) as f:
            jsonDict = json.load(f)

    # In case our JSON file doesnt contain a valid Classifier
    if not 'CondylesClassifier' in jsonDict:
        print("Error: Couldn't parameterize the network.")
        print("There is no 'CondylesClassifier' model.")
        return 0

    # If we have the Classifier, set all parameters for the network
    classifier = nn.neuralNetwork()

    # Essential parameters
    if 'NUM_CLASSES' in jsonDict['CondylesClassifier']:
        classifier.NUM_CLASSES = jsonDict['CondylesClassifier']['NUM_CLASSES']
    else:
        print("Missing NUM_CLASSES")

    if 'NUM_POINTS' in jsonDict['CondylesClassifier']:
        classifier.NUM_POINTS = jsonDict['CondylesClassifier']['NUM_POINTS']
    else:
        print("Missing NUM_POINTS")

    if 'NUM_FEATURES' in jsonDict['CondylesClassifier']:
        classifier.NUM_FEATURES = jsonDict['CondylesClassifier'][
            'NUM_FEATURES']
    else:
        print("Missing NUM_FEATURES")

    if sys.version_info[0] == 2:
        dictToClassify = pickle.load(open(pickleToClassify, "rb"))
    else:
        dictToClassify = pickle.load(open(pickleToClassify, "rb"),
                                     encoding='latin1')
    dictClassified = dict()

    for file in dictToClassify.keys():
        # print(file)
        # Create session, and import existing graph
        # print(shape)
        myData = get_input_shape(dictToClassify[file], classifier)
        session = tf.InteractiveSession()

        new_saver = tf.train.import_meta_graph(saveModelPath + '.meta')
        new_saver.restore(session, saveModelPath)
        graph = tf.Graph().as_default()

        # Get useful tensor in the graph
        tf_data = session.graph.get_tensor_by_name("Inputs_management/input:0")
        data_pred = session.graph.get_tensor_by_name("Predictions/output:0")

        feed_dict = {tf_data: myData}
        data_pred = session.run(data_pred, feed_dict=feed_dict)

        result = get_result(data_pred)
        dictClassified[file] = int(result)

    # Save into a JSON file
    with open(os.path.join(networkDir, 'results.json'), 'w') as f:
        json.dump(dictClassified, f, ensure_ascii=False, indent=4)

    # Zip all those files together
    zipPath = networkDir
    exportModelNetwork(zipPath, outputPath)

    return True
Example #8
0
import numpy, matplotlib.pyplot as plt
from neuralNetwork import neuralNetwork

# nodes and learning rate
input_layer = 784
hidden_layer = 100 # Optimal is 200, but it takes considerably longer to train and the % difference is about 1%
output_layer = 10
learning_rate = 0.1

# Instance of neural network
nn = neuralNetwork(input_layer, hidden_layer, output_layer, learning_rate)

# Loading training dataset - this code is for the small sample
# training_data_file = open("mnist_dataset/mnist_train_100.csv", 'r')
# training_data_list = training_data_file.readlines()
# training_data_file.close()

# Loading training dataset - this code is for the big sample. You need to unzip the full data sets
training_data_file = open("mnist_dataset\mnist_full_dataset\mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()

# TRAINING - Epochs are the naming standard for iterative trainings

#   We set the epoch = 2; which means that we will perform two training
#   sessions with the same training list of values, which means that the
#   training time will double.

epochs = 5

for epoch in range(epochs):
 def __init__(self):
     #initialize the controller, creating the loader, examine data and create the network
     self.ld = loader.loader('../data/mnist_train_100.csv')
     self.ex = examiner.examiner(ld)
     self.nw = neuralNetwork.neuralNetwork(784, 100, 10, 0.3)
Example #10
0
def main(_):
    print("\nTensorFlow current version : " + str(tf.__version__) + "\n")

    # Get the arguments from the command line
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--inputZip',
        action='store',
        dest='inputZip',
        help=
        'Input zip file which contains the datasets & the parameters for the classifier',
        default="")
    parser.add_argument(
        '--outputZip',
        action='store',
        dest='outputZip',
        help='Output zip file which will contain the neural netowrk trained',
        default="")

    args = parser.parse_args()

    inputZip = args.inputZip
    outputZip = args.outputZip
    basedir = os.path.dirname(inputZip)
    nameDir = os.path.splitext(os.path.basename(inputZip))[0]
    outputdir = os.path.dirname(outputZip)
    nameOuput = os.path.splitext(os.path.basename(outputZip))[0]

    outputPath = os.path.join(outputdir, nameOuput)
    networkDir = os.path.join(basedir, nameDir)
    print("networkDir : " + networkDir)

    if os.path.isdir(networkDir):
        shutil.rmtree(networkDir)
    os.mkdir(networkDir)

    # Unpack archive
    with zipfile.ZipFile(inputZip) as zf:
        # zf.extractall(networkDir)
        zf.extractall(basedir)

    jsonFile = os.path.join(networkDir, 'classifierInfo.json')
    saveModelPath = os.path.join(networkDir, 'CondylesClassifier')
    pickle_file = os.path.join(networkDir, 'datasets.pickle')

    #
    # Create a network for the classification
    #
    if sys.version_info[0] == 3:
        with open(jsonFile, encoding='utf-8') as f:
            jsonDict = json.load(f)
    else:
        with open(jsonFile) as f:
            jsonDict = json.load(f)

    # In case our JSON file doesnt contain a valid Classifier
    if not 'CondylesClassifier' in jsonDict:
        print("Error: Couldn't parameterize the network.")
        print("There is no 'CondylesClassifier' model.")
        return 0

    # If we have the Classifier, set all parameters for the network
    classifier = nn.neuralNetwork()

    # Essential parameters
    if 'NUM_CLASSES' in jsonDict['CondylesClassifier']:
        classifier.NUM_CLASSES = jsonDict['CondylesClassifier']['NUM_CLASSES']
    else:
        print("Missing NUM_CLASSES")
        accuracy = -1

    if 'NUM_POINTS' in jsonDict['CondylesClassifier']:
        classifier.NUM_POINTS = jsonDict['CondylesClassifier']['NUM_POINTS']
    else:
        print("Missing NUM_POINTS")
        accuracy = -1

    if 'NUM_FEATURES' in jsonDict['CondylesClassifier']:
        classifier.NUM_FEATURES = jsonDict['CondylesClassifier'][
            'NUM_FEATURES']
    else:
        print("Missing NUM_FEATURES")
        accuracy = -1

    # TODO: Manage case with incomplete parameterization of the classifier network

    # Specific parameters
    if 'learning_rate' in jsonDict['CondylesClassifier']:
        classifier.learning_rate = jsonDict['CondylesClassifier'][
            'learning_rate']
    else:
        classifier.learning_rate = 0.0005

    if 'lambda_reg' in jsonDict['CondylesClassifier']:
        classifier.lambda_reg = jsonDict['CondylesClassifier']['lambda_reg']
    else:
        classifier.lambda_reg = 0.01

    if 'num_epochs' in jsonDict['CondylesClassifier']:
        classifier.num_epochs = jsonDict['CondylesClassifier']['num_epochs']
    else:
        classifier.num_epochs = 2

    if 'num_steps' in jsonDict['CondylesClassifier']:
        classifier.num_steps = jsonDict['CondylesClassifier']['num_steps']
    else:
        classifier.num_steps = 11

    if 'batch_size' in jsonDict['CondylesClassifier']:
        classifier.batch_size = jsonDict['CondylesClassifier']['batch_size']
    else:
        classifier.batch_size = 10

    if 'NUM_HIDDEN_LAYERS' in jsonDict['CondylesClassifier']:
        classifier.NUM_HIDDEN_LAYERS = jsonDict['CondylesClassifier'][
            'NUM_HIDDEN_LAYERS']
        if classifier.NUM_HIDDEN_LAYERS:
            classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier'][
                'nb_hidden_nodes_1']
        if classifier.NUM_HIDDEN_LAYERS > 1:
            classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier'][
                'nb_hidden_nodes_2']
        # if classifier.NUM_HIDDEN_LAYERS > 2:
        #     classifier.nb_hidden_nodes_1 = jsonDict['CondylesClassifier']['nb_hidden_nodes_3']
    else:
        classifier.NUM_HIDDEN_LAYERS = 1
        classifier.nb_hidden_nodes_1 = (
            classifier.NUM_POINTS * classifier.NUM_FEATURES +
            classifier.NUM_CLASSES) // 2

    train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_inputs(
        pickle_file, classifier)

    accuracy = run_training(train_dataset, train_labels, valid_dataset,
                            valid_labels, test_dataset, test_labels,
                            saveModelPath, classifier)
    jsonDict['CondylesClassifier']['accuracy'] = accuracy
    with open(os.path.join(networkDir, 'classifierInfo.json'), 'w') as f:
        json.dump(jsonDict, f, ensure_ascii=False, indent=4)

    # Zip all those files together
    zipPath = networkDir
    exportModelNetwork(zipPath, outputPath)

    return
Example #11
0
y = np.array(dfobj['label'])

# number of labels to classify
num_labels = len(set(y))

# deal with y
num_examp = np.size(X, 0)
tmp = np.zeros([num_labels, num_examp])
p = list(y)
for i in range(num_examp):
    tmp[p[i] - 1][i] = 1

y = tmp

# X, y, num_labels, num_hidden_unit, _lambda, step
neuralNetwork = NN.neuralNetwork(X, y, num_labels, 500, 10, 100)

# learning_rate,0<motion_factor<1
neuralNetwork.train(0.8, 0)

# read the predict files and examine the correctness rate
#dfobj = pd.read_csv("D:/Data Science Experiment/Kaggle/Kaggle Digit Recognizer/small predict.csv")
matrix = np.array(dfobj)
ask = matrix[:, 1:]
ans = matrix[:, 0]

pred, res = neuralNetwork.predict(ask)
print(pred)
neuralNetwork.show_cost_values()
'''
para1 = neuralNetwork.theta1
Example #12
0
        "--model",
        help="Save model in different file",
        action="store_true",
    )
    parser.add_argument(
        "--split",
        metavar="[1-99]",
        help="Choose size of split",
        choices=(range(1, 100)),
        type=split_size,
        default=80,
    )

    args = parser.parse_args()
    get_seed(args.seed)
    input_n = 13
    output_n = 2
    hidden_layers = args.hidden_layer

    n = neuralNetwork(input_n, output_n, hidden_layers, args.learningrate,
                      sigmoid, args.bias)
    fit(args, n)
    print()
    if args.visu is True:
        fig1, ax1 = display(n.loss, n.val_loss, "Loss Trend", "loss",
                            "val_loss")
        fig2, ax2 = display(n.acc, n.val_acc, "Accuracy Trend", "acc",
                            "val_acc")
        plt.show()
    save_model(args.model, n)
Example #13
0
def main(_):

    # Get the arguments from the command line
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-inputZip',
        action='store',
        dest='inputZip',
        help=
        'Input zip file which contains the datasets & the parameters for the classifier',
        default="")

    # parser.add_argument('-inputFile', action='store', dest='inputFile', help='Input file to classify', default = "")

    args = parser.parse_args()

    inputZip = args.inputZip
    # inputFile = args.inputFile

    basedir = os.path.dirname(inputZip)
    nameDir = os.path.splitext(os.path.basename(inputZip))[0]

    networkDir = os.path.join(basedir, nameDir)
    print "networkDir : " + networkDir

    if os.path.isdir(networkDir):
        shutil.rmtree(networkDir)
    os.mkdir(networkDir)

    # Unpack archive
    with zipfile.ZipFile(inputZip) as zf:
        zf.extractall(basedir)

    jsonFile = os.path.join(networkDir, 'classifierInfo.json')
    saveModelPath = os.path.join(networkDir, 'CondylesClassifier')
    pickleToClassify = os.path.join(networkDir, 'toClassify.pickle')
    #
    # Create a network for the classification
    #
    with open(jsonFile) as f:
        jsonDict = json.load(f)

    # In case our JSON file doesnt contain a valid Classifier
    if not jsonDict.has_key('CondylesClassifier'):
        print "Error: Couldn't parameterize the network."
        print "There is no 'CondylesClassifier' model."
        return 0

    # If we have the Classifier, set all parameters for the network
    classifier = nn.neuralNetwork()

    # Essential parameters
    if 'NUM_CLASSES' in jsonDict['CondylesClassifier']:
        classifier.NUM_CLASSES = jsonDict['CondylesClassifier']['NUM_CLASSES']
    else:
        print "Missing NUM_CLASSES"

    if 'NUM_POINTS' in jsonDict['CondylesClassifier']:
        classifier.NUM_POINTS = jsonDict['CondylesClassifier']['NUM_POINTS']
    else:
        print "Missing NUM_POINTS"

    if 'NUM_FEATURES' in jsonDict['CondylesClassifier']:
        classifier.NUM_FEATURES = jsonDict['CondylesClassifier'][
            'NUM_FEATURES']
    else:
        print "Missing NUM_FEATURES"

    favorite_color = pickle.load(open(pickleToClassify, "rb"))

    print favorite_color

    print " .......... \n"
    for file in favorite_color.keys():
        print file

        print "\n\n FINIIII \n\n"

        # Create session, and import existing graph
        # print shape
        myData = get_input_shape(favorite_color[file], classifier)
        session = tf.InteractiveSession()

        new_saver = tf.train.import_meta_graph(saveModelPath + '.meta')
        new_saver.restore(session, saveModelPath)
        graph = tf.Graph().as_default()

        # Get useful tensor in the graph
        tf_data = session.graph.get_tensor_by_name("Inputs_management/input:0")
        data_pred = session.graph.get_tensor_by_name("Predictions/output:0")

        feed_dict = {tf_data: myData}
        data_pred = session.run(data_pred, feed_dict=feed_dict)

        result = get_result(data_pred)
        print "Shape : " + os.path.basename(file)
        print "Group predicted :" + str(result) + "\n"

    return result
Example #14
0
# 評価データ(全体の10%)
Xte = myData.X[dtrNum:]
Yte = myData.Y[dtrNum:]
#-------------------

#-------------------
# 3. 入力データの標準化
xMean = np.mean(Xtr, axis=0)
xStd = np.std(Xtr, axis=0)
Xtr = (Xtr - xMean) / xStd
Xte = (Xte - xMean) / xStd
#-------------------

#-------------------
# 4. ニューラルネットワークの学習と評価
myModel = nn.neuralNetwork(Xtr, Ytr, hDim=hDim, activeType=activeType)

trLoss = []
teLoss = []
trAcc = []
teAcc = []

for ite in range(1001):
    # 学習データの設定
    Xbatch = Xtr
    Ybatch = Ytr

    # 損失と正解率の記録
    trLoss.append(myModel.CE(Xtr, Ytr))
    teLoss.append(myModel.CE(Xte, Yte))
    trAcc.append(myModel.accuracy(Xtr, Ytr))
Example #15
0
vlabels = np.transpose(mat_contents['ValidationLabels'])
vlabels = vlabels.astype('double');

# const prms
features = train.shape[0]
obs = train.shape[1]
numlabels = np.ptp(labels)+1

# set labels to -1, 1; 0.95 has a better gradient
trainLabels = -0.95*np.ones( (numlabels, obs) )
for i in range(obs):
	trainLabels[labels[0][i]][i] = 0.95
trainFeatures = train

# train it
ann = nn.neuralNetwork(trainFeatures, trainLabels, 25)
ann.normalizeInputs()
ann.initializeWeights()
ann.train(1000)

# test it on the training set
# accuracy on the data it was trained off of
y = ann.test(trainFeatures)
trainGuess = np.argmax(y,axis=0)
AccTrain = np.sum( trainGuess==labels ).astype('double') / obs * 100
print AccTrain

# independent test data
y = ann.test(test)
testGuess = np.argmax(y,axis=0)
AccTest = np.sum( testGuess==tlabels).astype('double') / test.shape[1] * 100
Example #16
0
def main():
    global SCREEN, FPSCLOCK
    pygame.init()
    FPSCLOCK = pygame.time.Clock()
    SCREEN = pygame.display.set_mode((int(SCREENWIDTH), int(SCREENHEIGHT)))
    pygame.display.set_caption('Flappy Bird')

    # numbers sprites for score display
    IMAGES['numbers'] = (
        pygame.image.load('assets/sprites/0.png').convert_alpha(),
        pygame.image.load('assets/sprites/1.png').convert_alpha(),
        pygame.image.load('assets/sprites/2.png').convert_alpha(),
        pygame.image.load('assets/sprites/3.png').convert_alpha(),
        pygame.image.load('assets/sprites/4.png').convert_alpha(),
        pygame.image.load('assets/sprites/5.png').convert_alpha(),
        pygame.image.load('assets/sprites/6.png').convert_alpha(),
        pygame.image.load('assets/sprites/7.png').convert_alpha(),
        pygame.image.load('assets/sprites/8.png').convert_alpha(),
        pygame.image.load('assets/sprites/9.png').convert_alpha()
    )

    # base (ground) sprite
    IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()

    
    global models
    models = []
    for i in range(total_models):
        n = nn.neuralNetwork(5, 11, 1)
        models.append(Player(n))

    while True:
        # select random background sprites
        randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
        IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()

        # select random player sprites
        randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
        IMAGES['player'] = (
            pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
            pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
            pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
        )

        # select random pipe sprites
        pipeindex = random.randint(0, len(PIPES_LIST) - 1)
        IMAGES['pipe'] = (
            pygame.transform.rotate(
                pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
            pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
        )

        # hismask for pipes
        HITMASKS['pipe'] = (
            getHitmask(IMAGES['pipe'][0]),
            getHitmask(IMAGES['pipe'][1]),
        )

        # hitmask for player
        HITMASKS['player'] = (
            getHitmask(IMAGES['player'][0]),
            getHitmask(IMAGES['player'][1]),
            getHitmask(IMAGES['player'][2]),
        )

        movementInfo = showWelcomeAnimation()
        mainGame(movementInfo)
        showGameOverScreen() #crashInfo)
def test10Fold():
    global allWords
    splits = tenFoldCrossValidation()

    count = 0
    total = 0
    print("Naive Bayes")
    for split in splits:
        nb = naiveBayes()
        trainFeatures = []
        trainClasses = []
        testFeatures = []
        testClasses = []
        for example in split.train:
            trainFeatures.append(example.features)
            trainClasses.append(example.klass)
        for example in split.test:
            testFeatures.append(example.features)
            testClasses.append(example.klass)

        nb.train(trainFeatures, trainClasses)
        nb.test(testFeatures, testClasses)
        accuracy = nb.getCorrectCount() / len(testClasses)
        total = total + accuracy
        print("[INFO]\tFold ", str(count), " Accuracy:", str(accuracy))
        count = count + 1

    print("[INFO]\tAccuracy:", str(total / 10))

    count = 0
    total = 0
    print("Random Forest")
    for split in splits:
        nb = RandomForest(100)
        trainFeatures = []
        trainClasses = []
        testFeatures = []
        testClasses = []
        for example in split.train:
            trainFeatures.append(example.features)
            trainClasses.append(example.klass)
        for example in split.test:
            testFeatures.append(example.features)
            testClasses.append(example.klass)

        nb.train(trainFeatures, trainClasses)
        nb.test(testFeatures, testClasses)
        accuracy = nb.getCorrectCount() / len(testClasses)
        total = total + accuracy
        print("[INFO]\tFold ", str(count), " Accuracy:", str(accuracy))
        count = count + 1

    print("[INFO]\tAccuracy:", str(total / 10))

    count = 0
    total = 0
    print("Neural 5")
    for split in splits:
        nb = neuralNetwork((5, ), 1000)
        trainFeatures = []
        trainClasses = []
        testFeatures = []
        testClasses = []
        for example in split.train:
            trainFeatures.append(example.features)
            trainClasses.append(example.klass)
        for example in split.test:
            testFeatures.append(example.features)
            testClasses.append(example.klass)

        nb.train(trainFeatures, trainClasses)
        nb.test(testFeatures, testClasses)
        accuracy = nb.getCorrectCount() / len(testClasses)
        total = total + accuracy
        print("[INFO]\tFold ", str(count), " Accuracy:", str(accuracy))
        count = count + 1

    print("[INFO]\tAccuracy:", str(total / 10))

    count = 0
    total = 0
    print("Neural 3")
    for split in splits:
        nb = neuralNetwork((3, ), 1000)
        trainFeatures = []
        trainClasses = []
        testFeatures = []
        testClasses = []
        for example in split.train:
            trainFeatures.append(example.features)
            trainClasses.append(example.klass)
        for example in split.test:
            testFeatures.append(example.features)
            testClasses.append(example.klass)

        nb.train(trainFeatures, trainClasses)
        nb.test(testFeatures, testClasses)
        accuracy = nb.getCorrectCount() / len(testClasses)
        total = total + accuracy
        print("[INFO]\tFold ", str(count), " Accuracy:", str(accuracy))
        count = count + 1

    print("[INFO]\tAccuracy:", str(total / 10))

    count = 0
    total = 0
    print("SVM")
    for split in splits:
        nb = svm()
        trainFeatures = []
        trainClasses = []
        testFeatures = []
        testClasses = []
        for example in split.train:
            trainFeatures.append(example.features)
            trainClasses.append(example.klass)
        for example in split.test:
            testFeatures.append(example.features)
            testClasses.append(example.klass)

        nb.train(trainFeatures, trainClasses)
        nb.test(testFeatures, testClasses)
        accuracy = nb.getCorrectCount() / len(testClasses)
        total = total + accuracy
        print("[INFO]\tFold ", str(count), " Accuracy:", str(accuracy))
        count = count + 1

    print("[INFO]\tAccuracy:", str(total / 10))
Example #18
0
        self.imageLabelNomb = imageLabelNomb


imageLabelNomb = leerDatosTxt(ruta=classMatDetec.rpe)

shuffle(imageLabelNomb)  #Desordenamos la lista que contiene las etiquetas
sn = soloNombres(imageLabelNomb)
# =========================================================================================== #

# Cargamos el modelo si existe si no se crea desde 0 #
# ================================================== #
if os.path.exists(classMatDetec.h5):

    #model = tf.keras.models.load_model(classMatDetec.h5, custom_objects={'loss_function': loss_function})

    model, h_out = neuralNetwork()
    model.compile(loss=lossFunction,
                  optimizer=tf.keras.optimizers.Adam(lr=0.001))
    #model.compile(loss=loss_function,optimizer=tf.keras.optimizers.RMSprop(lr=0.001,rho=0.9,epsilon=None,decay=0.0))
    model.load_weights(classMatDetec.h5)

else:

    model, h_out = neuralNetwork()
    #model.compile(loss=loss_function,optimizer=tf.keras.optimizers.Adam(lr = 0.001))
    model.compile(loss=lossFunction,
                  optimizer=tf.keras.optimizers.RMSprop(lr=0.001,
                                                        rho=0.9,
                                                        epsilon=None,
                                                        decay=0.0))
Example #19
0
def setupModel(allData):
    lr = 0.15
    inputNodes = len(allData[0][1])
    hiddenNodes = 100
    outputNodes = amountOfLabels(allData)
    return neuralNetwork(inputNodes, hiddenNodes, outputNodes, lr)
Example #20
0
def main(_):
    print "\nTensorFlow current version : " + str(tf.__version__) + "\n"

    # Get the arguments from the command line
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-inputZip',
        action='store',
        dest='inputZip',
        help=
        'Input zip file which contains the datasets & the parameters for the classifier',
        default="")

    # parser.add_argument('-saveModelPath', action='store', dest='saveModelPath', help='Path to the saved model to use', default='weights_5Groups')

    # parser.add_argument('-jsonFile', action='store', dest='jsonFile', help='JSON file which contains all the parameters to use the network', default='')

    args = parser.parse_args()
    # pickle_file = args.pickle_file
    # saveModelPath = args.saveModelPath
    # jsonFile = args.jsonFile
    inputZip = args.inputZip
    basedir = os.path.dirname(inputZip)
    nameDir = os.path.splitext(os.path.basename(inputZip))[0]

    networkDir = os.path.join(basedir, nameDir)
    print "networkDir : " + networkDir

    if os.path.isdir(networkDir):
        shutil.rmtree(networkDir)
    os.mkdir(networkDir)

    # Unpack archive
    with zipfile.ZipFile(inputZip) as zf:
        zf.extractall(basedir)

    jsonFile = os.path.join(networkDir, 'classifierInfo.json')
    saveModelPath = os.path.join(networkDir, 'CondylesClassifier')
    pickle_file = os.path.join(networkDir, 'datasets.pickle')
    #
    # Create a network for the classification
    #
    with open(jsonFile) as f:
        jsonDict = json.load(f)

    # In case our JSON file doesnt contain a valid Classifier
    if not jsonDict.has_key('CondylesClassifier'):
        print "Error: Couldn't parameterize the network."
        print "There is no 'CondylesClassifier' model."
        return 0

    # If we have the Classifier, set all parameters for the network
    classifier = nn.neuralNetwork()

    # Essential parameters
    if 'NUM_CLASSES' in jsonDict['CondylesClassifier']:
        classifier.NUM_CLASSES = jsonDict['CondylesClassifier']['NUM_CLASSES']
    else:
        print "Missing NUM_CLASSES"
        accuracy = -1

    if 'NUM_POINTS' in jsonDict['CondylesClassifier']:
        classifier.NUM_POINTS = jsonDict['CondylesClassifier']['NUM_POINTS']
    else:
        print "Missing NUM_POINTS"
        accuracy = -1

    if 'NUM_FEATURES' in jsonDict['CondylesClassifier']:
        classifier.NUM_FEATURES = jsonDict['CondylesClassifier'][
            'NUM_FEATURES']
    else:
        print "Missing NUM_FEATURES"
        accuracy = -1

    # TODO: Manage case with incomplete parameterization of the classifier network

    # Specific parameters
    if 'learning_rate' in jsonDict['CondylesClassifier']:
        classifier.learning_rate = jsonDict['CondylesClassifier'][
            'learning_rate']
    else:
        classifier.learning_rate = 0.0005

    if 'lambda_reg' in jsonDict['CondylesClassifier']:
        classifier.lambda_reg = jsonDict['CondylesClassifier']['lambda_reg']
    else:
        classifier.lambda_reg = 0.01

    if 'num_epochs' in jsonDict['CondylesClassifier']:
        classifier.num_epochs = jsonDict['CondylesClassifier']['num_epochs']
    else:
        classifier.num_epochs = 2

    if 'num_steps' in jsonDict['CondylesClassifier']:
        classifier.num_steps = jsonDict['CondylesClassifier']['num_steps']
    else:
        classifier.num_steps = 11

    if 'batch_size' in jsonDict['CondylesClassifier']:
        classifier.batch_size = jsonDict['CondylesClassifier']['batch_size']
    else:
        classifier.batch_size = 10

    if 'NUM_HIDDEN_LAYERS' in jsonDict['CondylesClassifier']:
        classifier.NUM_HIDDEN_LAYERS = jsonDict['CondylesClassifier'][
            'NUM_HIDDEN_LAYERS']
    else:
        classifier.NUM_HIDDEN_LAYERS = 2

    train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_inputs(
        pickle_file, classifier)

    accuracy = run_training(train_dataset, train_labels, valid_dataset,
                            valid_labels, test_dataset, test_labels,
                            saveModelPath, classifier)
    jsonDict['CondylesClassifier']['accuracy'] = accuracy

    # Zip all those files together
    zipPath = networkDir
    exportModelNetwork(zipPath)

    return
Example #21
0
from neuralNetwork import neuralNetwork

input_nodes = 3
hidden_nodes = 3
output_nodes = 3

learning_rate = 0.3

n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

print(n.query([1.0, 0.5, -1.5]))
def classifierCombination(trainData, trainLabels, testData):
    count = 2
    models = [None] * count
    '''
	ratio = i/(count-1.0)
		ratio = 0.10+ratio*(0.20-0.10)
		weights = {0:ratio, 1:(1-ratio)}
	'''
    weights = {0: 1, 1: 10}
    samples = testData.shape[0]
    sums = np.zeros(samples)
    for i in range(count):

        if (i == 0):
            model = LinearSVC(penalty='l2',
                              loss='squared_hinge',
                              dual=True,
                              tol=0.0001,
                              C=1.0,
                              fit_intercept=True,
                              intercept_scaling=1,
                              class_weight=weights,
                              verbose=True,
                              random_state=None,
                              max_iter=2000)
            # penalty - l1, l2; loss - square_hinge (better) and hinge
        elif (i == 1):
            model = linear_model.LogisticRegression(solver='lbfgs',
                                                    verbose=True,
                                                    max_iter=5000,
                                                    class_weight=weights)
        elif (i == 2):
            '''
			model = MLPClassifier(activation='tanh', alpha=1e-05, batch_size=4000, beta_1=0.9, beta_2=0.999, early_stopping=False,epsilon=1e-08, 
				hidden_layer_sizes=100, learning_rate='constant', learning_rate_init=0.001, max_iter=1000, momentum=0.9, 
				nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True, solver='lbfgs', tol=0.0001, validation_fraction=0.1, 
				verbose=True, warm_start=False)
			'''
            labels = neuralNetwork(trainData, trainLabels, testData, 1)

        model.fit(trainData, trainLabels)
        models[i] = model
        print('models trained')

        if (i == 0):
            labels = models[i].decision_function(testData)
        elif (i == 1):
            temp = models[i].predict_proba(testData)
            labels = np.zeros(temp.shape[0])
            for j in range(temp.shape[0]):
                labels[j] = temp[j][1] - temp[j][0]
        #print(labels[0:10])
        sums = [x + y for x, y in zip(sums, labels)]

    combinedLabels = np.zeros(samples)
    print('--------------------------------------------------')
    for i in range(samples):
        #print('sum - ',sums[i],' ',i),
        if sums[i] > 0:
            combinedLabels[i] = 1
        else:
            combinedLabels[i] = 0
    return combinedLabels
    '''
Example #23
0
#! /usr/bin/python
import numpy as np
import neuralNetwork as nn
import matplotlib.pyplot as plt

plt.close()

filename = 'iris.txt'
features = 4;
observations = len(open(filename).readlines())
classes = 3;

x = np.transpose(np.loadtxt(filename, delimiter=',', usecols=(0,1,2,3)))
y = np.transpose(np.loadtxt(filename, delimiter=',', usecols=(4,5,6)))

a = nn.neuralNetwork(x, y, 10);
a.normalizeInputs()
a.initializeWeights()
a.train(10000);

b = a.test(x, y)
plt.plot(b[0])
plt.plot(b[1])
plt.plot(b[2])
def classifier():

    #X_label, X_train, Y_test = make_data()
    if not os.path.exists('models'):
        os.mkdir('models')
    data, labels = preprocess()
    fold = 3
    count = 0
    choice = 6
    #0- knn_classifier, 1 - autoencoder, 2 - neural network, 3 - bernoilli restricted boltzmann machine
    fileName = 'test_log_mano_50'
    results = [None] * fold
    for train_index, test_index in split(data, labels, fold):
        count += 1
        trainData, testData = data[train_index], data[test_index]
        trainLabels, testLabels = labels[train_index], labels[test_index]

        print('splitting done')
        print('Number of 1 - ', sum(testLabels), ' 0 - ',
              (len(testLabels) - sum(testLabels)))

        if (choice == 0):
            knnLabels = createKnnModel(trainData, trainLabels, testData)
            print('knn trained')
            auc = consfusion_eval(testLabels, knnLabels)

        elif (choice == 1):
            autoencoder(trainData, trainLabels, testData, fileName, count)
            maxAuc = maxThreshold = 0
            for i in range(10, 80):
                thresh = i / 1000
                autoencoderLabels = evaluate(
                    ('models/' + fileName + '_%i') % count, thresh)
                print('Thresh - ', thresh),
                temp = consfusion_eval(testLabels, autoencoderLabels)
                if (temp > maxAuc):
                    maxAuc = temp
                    maxThreshold = thresh
            print('Max auc - ', maxAuc, ' threshold - ', maxThreshold)
            auc = maxAuc

        elif (choice == 2):
            nnLabels = neuralNetwork(trainData, trainLabels, testData, count)
            #nnLabels = neuralNetworkAuc(trainData, trainLabels, testData, count)
            auc = consfusion_eval(testLabels, nnLabels)

        elif (choice == 3):
            boltzLabels = restrictedBoltzmannMachine(trainData, trainLabels,
                                                     testData)
            auc = consfusion_eval(testLabels, boltzLabels)

        elif (choice == 4):
            #clabels = randomForest(trainData, trainLabels, testData)
            #clabels = logisticRegression(trainData, trainLabels, testData)
            #clabels = svmClassifier(trainData, trainLabels, testData)
            #clabels = adaBoostClassifier(trainData, trainLabels, testData)
            #clabels = classifierCombination(trainData, trainLabels, testData)
            #clabels = mlpClassifier(trainData, trainLabels, testData)
            clabels = lgbmclassifier(trainData, trainLabels, testData)
            auc = consfusion_eval(testLabels, clabels)

        elif (choice == 5):
            featureSelectionMI(trainData, trainLabels, testData, testLabels)
        elif (choice == 6):
            nlabels = neuralNetworkAuc(trainData, trainLabels, testData, count)
            auc = area_under_curve(testLabels, nlabels)

        results[count - 1] = auc
        print('fold ', count, ' AUC - ', auc)
    print('Mean auc - ', np.mean(results))