Пример #1
0
class MatchTests:
    def __init__(self, samStart=11, samEnd=36):
        self.data = GetData(sampleStart=samStart, sampleEnd=samEnd)
        self.avgs = self.data.GetAveraged()
        self.samStart = samStart

    def ClassifyTests(
            self,
            testDir="/home/brian/PycharmProjects/firstAttempt/HandLetters/"):
        matches = []
        testImages = [
            (self.data.NormalizeImage(cv2.imread(testDir + "/" + j, 0)), j)
            for j in os.listdir(testDir)
        ]  #tuble(img,name)
        for i in testImages:
            name = i[1]
            img = i[0]
            bestMatch = 0
            print type(self.avgs[0])
            errorArray = self.avgs[0].astype(np.float_) - img.astype(np.float_)
            errorArray = errorArray * errorArray
            minError = sum(sum(errorArray))
            for j in xrange(len(self.avgs)):
                errorArray = self.avgs[j].astype(np.float_) - img.astype(
                    np.float_)
                errorArray = errorArray * errorArray
                error = sum(sum(errorArray))
                if error < minError:
                    minError = error
                    bestMatch = j
            matches.append(name + "was identified to be: " +
                           self.data.GetLetters()[bestMatch + self.samStart])
        return matches
Пример #2
0
 def __init__(self, sampStart=11, sampEnd=36):
     data = GetData(sampleStart=sampStart, sampleEnd=sampEnd)
     self.sampStart = sampStart
     self.sampEnd = sampEnd
     self.imgs = data.GetListImgs()
     self.letters = data.GetLetters()
     avgs = data.GetAveraged()
     self.avgs = avgs[sampStart - 1:sampEnd]
     self.kguesses = np.array([list(j.flatten()) for j in avgs])
     self.k = sampEnd - sampStart + 1
     self.data = data
Пример #3
0
    if len(layers1) != len(layers2):
        return False
    else:
        for i in range(len(layers1)):
            w1 = layers1[i].get_weights()
            w2 = layers2[i].get_weights()
            if not np.array_equal(w1, w2):
                return False

        return True


# defining the folders path train and test
TRAINING_DATASET_FOLDER_NAME = '3_preprocessed_1_dataset train'

X_train, Y_train, _ = GetData(TRAINING_DATASET_FOLDER_NAME, limit_value=1)
Y_train = np_utils.to_categorical(Y_train, 2)
X_train = X_train.astype('float32')
X_train /= np.max(X_train)

width = 80
height = 80
depth = 2
num_classes = 2

# load the model architecture from file
a = read_model("models/model01.txt")
modelObject = ModelBuilder.ModelBuilder(a, (height, width, depth))
model = modelObject.model

model.compile(
Пример #4
0
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


a = read_model("models/model08.txt")
modelObject = ModelBuilder(a, (80, 80, 2))
model = modelObject.model
model.load_weights(
    'trained_model/2018-07-04 22:29:20/model08.txt_2018-07-05 12:24:21.h5')
#plot_model(model, to_file='model_graph.png', show_shapes=True, show_layer_names=True)

(X_test, y_test, _) = GetData('lfw-whofitinram_p80x80')
Y_test = np_utils.to_categorical(y_test, num_classes)
X_test = X_test.astype('float32')
X_test /= np.max(X_test)  # Normalise data to [0, 1] range
model.compile(
    loss='categorical_crossentropy',  # using the cross-entropy loss function
    optimizer='adam',  # using the Adam optimiser
    metrics=['accuracy'])

model.summary()
#evaluate the mode
print(model.evaluate(X_test, Y_test))
predictions = model.predict(X_test)

#========================PRINT CONFUSION MATRIX==============================
#predict labels
Пример #5
0
bp = 'trained_model/'

NUM_CLASSES = 2
TEST_DATASET_FOLDER_NAME = '3_preprocessed_1_dataset train'
MAX_IMAGES_TO_PLOT = 36
NUM_PRINTED_PAGES = 3
MODEL_TO_LOAD = '2018-05-06 15:25:00.h5'

a = read_model("models/model1.txt")
modelObject = ModelBuilder.ModelBuilder(a, (80, 80, 2))
model = modelObject.model
model.load_weights(bp + MODEL_TO_LOAD)

#model = load_model(bp + MODEL_TO_LOAD)

(X_test, y_test, _) = GetData(TEST_DATASET_FOLDER_NAME, 5)
Y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
X_test = X_test.astype('float32')
X_test /= np.max(X_test)  # Normalise data to [0, 1] range
model.compile(
    loss='categorical_crossentropy',  # using the cross-entropy loss function
    optimizer='adam',  # using the Adam optimiser
    metrics=['accuracy'])

print(model.evaluate(X_test, Y_test))

sum = 0
for i in range(len(y_test)):
    sum += y_test[i]

print(sum)
def CrossValidate(k,
                  models,
                  models_array_name,
                  dataset_folder_name,
                  batch_size,
                  num_epochs=200,
                  chat_id="undefined",
                  folders_at_the_same_time=20,
                  max_num_of_validation_folders=12,
                  validate_every=5,
                  validation_treshold=0):

    avg_val_accuracy_models = []
    total_num_folders = len(os.listdir(dataset_folder_name))
    folders_each_validation = total_num_folders // k if total_num_folders < max_num_of_validation_folders else max_num_of_validation_folders
    timestamp = current_datetime()
    path = 'Crossvalidation Results/crossvaliationresults_' + timestamp + '.txt'

    # folders where models will be saved
    os.mkdir("trained_model/" + timestamp)

    with open(path, 'w') as the_file:
        the_file.write(current_datetime() + '\n')

    for i in range(len(models)):
        print("\n validating model: " + models_array_name[i])
        sum_model_validations_acc = 0
        to_avoid_validation = []

        with open(path, 'a') as the_file:
            the_file.write('\n \n \n \n model: ' + str(i))

        with open(path, 'a') as the_file:
            models[i].summary(
                print_fn=lambda x: the_file.write('\n' + x + '\n'))

        # send a message on telegram when the training of another model is starting
        if chat_id != 'undefined':
            telegram_send_msg("START TRAINING {}".format(models_array_name[i]))

        for j in range(k):
            print("\n validation round " + str(j))
            (X_validation, Y_validation, validation_folders_list) = GetData(
                dataset_folder_name, limit_value=folders_each_validation)
            X_validation = X_validation.astype('float32')
            X_validation /= np.max(X_validation)
            Y_validation = np_utils.to_categorical(Y_validation, 2)
            to_avoid_validation = to_avoid_validation + validation_folders_list

            validation_history = Train.SingletonTrain().Train(
                models[i],
                training_dataset_folder_name=dataset_folder_name,
                epochs=num_epochs,
                batch_size=batch_size,
                training_folders_count=folders_at_the_same_time,
                validation_x=X_validation,
                validation_y=Y_validation,
                to_avoid=validation_folders_list,
                validate_every=validate_every,
                subfolder_name=timestamp,
                enable_telegram_bot=(chat_id != "undefined"),
                save_model=models_array_name[i],
                validation_treshold=validation_treshold)

            if len(validation_history) > 0:
                sum_model_validations_acc += (validation_history[-1])[1]

        avg_val_accuracy_models += [sum_model_validations_acc / k]

        with open(path, 'a') as the_file:
            the_file.write('\n validation results ' +
                           str(sum_model_validations_acc / k))
Пример #7
0
 def __init__(self, samStart=11, samEnd=36):
     self.data = GetData(sampleStart=samStart, sampleEnd=samEnd)
     self.avgs = self.data.GetAveraged()
     self.samStart = samStart