示例#1
0
def main():

    ap = argparse.ArgumentParser()
    ap.add_argument("-s", "--save-model", type=int, default=-1, help="(optional) whether or not model should be saved to disk")
    ap.add_argument("-l", "--load-model", type=int, default=-1, help="(optional) whether or not pre-trained model should be loaded")
    ap.add_argument("-w", "--weights", type=str, help="(optional) path to weights file")
    args = vars(ap.parse_args())

    data = load_dataset()
    dataset = shape_data(data)

    n_epochs = 4000
    opt = SGD(lr=0.01)
    cross_validation_exp = 10

    for i in xrange(0, cross_validation_exp):

        print "Running Experiment: ", i

        trainData, testData, trainLabels, testLabels = train_test_split(dataset / 255.0, data.target.astype("int"), test_size=0.10)

        #tbCallBack = keras.callbacks.TensorBoard(log_dir='/home/matthia/Desktop/ogs', histogram_freq=0, write_graph=True, write_images=False)

        trainLabels = make_categorical(trainLabels, 10)
        testLabels = make_categorical(testLabels, 10)

        print("[INFO] compiling model...")

        model_MatFra = LeNet.build(width=8, height=8, depth=1, classes=10, mode=1, weightsPath=args["weights"] if args["load_model"] > 0 else None)
        model_MatFra.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

        history_MatFra = model_MatFra.fit(trainData, trainLabels, batch_size=50, nb_epoch=n_epochs, verbose=1, validation_data=(testData, testLabels))#,callbacks=[tbCallBack])

        #print("[INFO] evaluating...")
        #(loss, accuracy) = model_MatFra.evaluate(testData, testLabels, batch_size=128, verbose=1)
        #print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))
        MatFra_plots(history_MatFra, i)

        model_Google = LeNet.build(width=8, height=8, depth=1, classes=10, mode=2, weightsPath=args["weights"] if args["load_model"] > 0 else None)
        model_Google.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

        history_Google = model_Google.fit(trainData, trainLabels, batch_size=50, nb_epoch=n_epochs, verbose=1, validation_data=(testData, testLabels))#,callbacks=[tbCallBack])
        
        Google_plots(history_Google, i)
示例#2
0
def main():

    ap = argparse.ArgumentParser()
    ap.add_argument("-s", "--save-model", type=int, default=-1, help="(optional) whether or not model should be saved to disk")
    ap.add_argument("-l", "--load-model", type=int, default=-1, help="(optional) whether or not pre-trained model should be loaded")
    ap.add_argument("-w", "--weights", type=str, help="(optional) path to weights file")
    args = vars(ap.parse_args())

    X = load_Positions()
    X = shape_data(X)
    y = load_Labels()

    n_epochs = 2000
    opt = SGD(lr=0.01)
    cross_validation_exp = 1

    trainData, testData, trainLabels, testLabels = train_test_split(X, y, test_size=0.1, random_state=42)

    print 'Deleting old logs in 3 sec...'
    time.sleep(3)
    items = os.listdir('/home/borg/Desktop/logs')
    [os.remove('/home/borg/Desktop/logs/'+i ) for i in items] 

    for i in xrange(0, cross_validation_exp):

        print "Running Experiment: ", i

        #trainData = load_Train_data()
        #trainLabels = load_Train_labels()
        #testData = load_Validation_data()
        #testLabels = load_Validation_labels()
    
           

        tbCallBack = keras.callbacks.TensorBoard(log_dir='/home/borg/Desktop/logs', histogram_freq=0, write_graph=True, write_images=False)

        print("[INFO] compiling model...")

        model_MatFra = LeNet.build(width=8, height=8, depth=1, classes=3, mode=1, weightsPath=args["weights"] if args["load_model"] > 0 else None)
        model_MatFra.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

        

        history_MatFra = model_MatFra.fit(trainData, trainLabels, batch_size=128, nb_epoch=n_epochs, verbose=1, validation_data=(testData, testLabels), callbacks=[tbCallBack])

        print("[INFO] evaluating...")
        (loss, accuracy) = model_MatFra.evaluate(testData, testLabels, batch_size=128, verbose=1)
        print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))
        MatFra_plots(history_MatFra, i)
示例#3
0
# print "trainLabels", trainLabels
# print "testLabels", testLabels

# transform the training and testing labels into vectors in the
# range [0, classes] -- this generates a vector for each label,
# where the index of the label is set to `1` and all other entries
# to `0`; in the case of our problem, there are 98 class labels
trainLabels = np_utils.to_categorical(trainLabels, 98)
testLabels = np_utils.to_categorical(testLabels, 98)

# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(
    width=13,
    height=33,
    depth=1,
    classes=98,
    weightsPath=args["weights"] if args["load_model"] > 0 else None)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# only train and evaluate the model if we *are not* loading a
# pre-existing model
if args["load_model"] < 0:
    print("[INFO] training...")
    model.fit(trainData, trainLabels, batch_size=128, nb_epoch=20, verbose=1)

    # show the accuracy on the testing set
    print("[INFO] evaluating...")
    (loss, accuracy) = model.evaluate(testData,
示例#4
0
#	data / 255.0, dataset.target.astype("int"), test_size=0.33)

# transform the training and testing labels into vectors in the
# range [0, classes] -- this generates a vector for each label,
# where the index of the label is set to `1` and all other entries
# to `0`; in the case of MNIST, there are 10 class labels
#trainLabels = np_utils.to_categorical(trainLabels, 10)
#testLabels = np_utils.to_categorical(testLabels, 10)

# initialize the optimizer and model
#print image_list.shape
print("[INFO] compiling model...")
opt = SGD(lr=0.005)
model = LeNet.build(
    width=200,
    height=150,
    depth=1,
    classes=6,
    weightsPath=args["weights"] if args["load_model"] > 0 else None)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# only train and evaluate the model if we *are not* loading a
# pre-existing model
if args["load_model"] < 0:
    print("[INFO] training...")
    model.fit(image_list, image_labels, batch_size=20, nb_epoch=10, verbose=1)

    # show the accuracy on the testing set
    print("[INFO] evaluating...")
    for i in range(120):
示例#5
0
#	data / 255.0, labels, test_size=0.33)

# transform the training and testing labels into vectors in the
# range [0, classes] -- this generates a vector for each label,
# where the index of the label is set to `1` and all other entries
# to `0`; in the case of MNIST, there are 10 class labels
#trainLabels = np_utils.to_categorical(trainLabels, 3)
#testLabels = np_utils.to_categorical(testLabels, 3)
labels = np_utils.to_categorical(labels, 3)
# initialize the optimizer and model

print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(
    width=224,
    height=224,
    depth=3,
    classes=3,
    weightsPath=args["weights"] if args["load_model"] > 0 else None)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# only train and evaluate the model if we *are not* loading a
# pre-existing model
if args["load_model"] < 0:
    print("[INFO] training...")
    model.fit(data / 255.0, labels, batch_size=32, epochs=10, verbose=1)

    # show the accuracy on the testing set
    #print("[INFO] evaluating...")
    #(loss, accuracy) = model.evaluate(testData, testLabels,
示例#6
0
def main():

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    #Param Configuration!

    path = '/home/borg/sudoRepo/Thesis/DataSet/'
    pic_shape = (300, 300, 3)
    n_epochs = 50
    opt = SGD(lr=0.001)
    adam = keras.optimizers.Adam(lr=0.0005,
                                 beta_1=0.9,
                                 beta_2=0.99,
                                 epsilon=1e-08,
                                 decay=0.0)
    n_classes = 10
    train = True  # If false, load parameters and run validation!

    precise_evaluation = False
    ##################################

    trainData = np.load(path + 'x.npy')
    trainData = np.reshape(
        trainData,
        (trainData.shape[0], pic_shape[0], pic_shape[1], pic_shape[2]))
    trainLabels = np.load(path + 'y.npy')

    trainData, testData, trainLabels, testLabels = train_test_split(
        trainData, trainLabels, test_size=0.2, random_state=0)

    #testData = np.load(path+'test_x.npy')
    #testData = np.reshape(testData, (testData.shape[0], pic_shape[0],pic_shape[1],pic_shape[2]))
    #testLabels = np.load(path+'test_y.npy')
    print 'Data Loaded!'

    print 'Deleting old logs in 3 sec...'
    time.sleep(1)
    items = os.listdir('/home/borg/SabBido/logs')
    [os.remove('/home/borg/SabBido/logs/' + i) for i in items]

    tbCallBack = keras.callbacks.TensorBoard(log_dir='/home/borg/SabBido/logs',
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_images=True)

    print("[INFO] compiling model...")

    model_MatFra = LeNet.build(pic_shape[0],
                               pic_shape[1],
                               pic_shape[2],
                               classes=n_classes,
                               mode=1)
    model_MatFra.compile(loss="categorical_crossentropy",
                         optimizer=opt,
                         metrics=["accuracy"])

    #model_MatFra.save_weights("../NN_param_sim/startingWeights.h5")
    #model_MatFra.load_weights("../NN_param_sim/startingWeights_working.h5")
    print model_MatFra.summary()
    time.sleep(5)

    early_stopping = EarlyStopping(monitor='val_loss', patience=5)

    #model_MatFra.load_weights("../NN_param_sim/model.h5")
    if train:
        history_MatFra = model_MatFra.fit(
            trainData,
            trainLabels,
            batch_size=50,
            epochs=n_epochs,
            verbose=1,
            validation_data=(testData, testLabels),
            callbacks=[tbCallBack, early_stopping])

        # serialize model to JSON
        #model_json = model_MatFra.to_json()
        #with open("../NN_param/model.json", "w") as json_file:
        #    json_file.write(model_json)

        #serialize weights to HDF5
        model_MatFra.save_weights("../NN_param_sim/model.h5")
        print("Saved model to disk")

    else:

        model_MatFra.load_weights("../NN_param_sim/model.h5")
        print("[INFO] evaluating...")
    (loss, accuracy) = model_MatFra.evaluate(testData,
                                             testLabels,
                                             batch_size=50,
                                             verbose=1)
    print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))

    if precise_evaluation:
        print 'specific: '
        for i in xrange(len(testData)):
            # classify the digit
            probs = model_MatFra.predict(testData[np.newaxis, i])
            prediction = probs.argmax(axis=1)
            print "#########################"
            print testLabels[i]
            probs = np.round(probs, decimals=2)
            print probs
            print "-------------------------"
            time.sleep(0.5)
示例#7
0
    label = imagePath.split(os.path.sep)[-2]
    labels.append(label)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
labels, test_size=0.25, random_state=42)
# convert the labels from integers to vectors
lb = LabelBinarizer().fit(trainY)
trainY = lb.transform(trainY)
testY = lb.transform(testY)
# initialize the model
print("[INFO] compiling model...")
model = LeNet.build(width=28, height=28, depth=1, classes=10)
opt = SGD(lr=0.01)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# ---------train the network
print("[INFO] training network...")
H = model.fit(trainX, trainY, validation_data=(testX, testY),
batch_size=32, epochs=15, verbose=1)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)

print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=lb.classes_))
# save the model to disk
print("[INFO] serializing network...")