def __init__(self):
                # initializebevideo stream
                self.video_stream = cv2.VideoCapture(
                    VIDEO_PREDICTOR.camera_source)
                self.face_detector = cv2.CascadeClassifier(
                    VIDEO_PREDICTOR.face_detection_classifier)
                self.shape_predictor = None
                self.shape_predictor = dlib.shape_predictor(
                    DATASET.shape_predictor_path)

                model = None
                with tf.Graph().as_default():
                    network = build_model(
                        use_landmarks=True,
                        use_hog_and_landmarks=True,
                        use_hog_sliding_window_and_landmarks=True)
                    model = DNN(network)
                    if os.path.isfile(
                            TRAINING.save_model_path_landmarks_hog_sw):
                        model.load(TRAINING.save_model_path_landmarks_hog_sw)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAINING.save_model_path_landmarks_hog_sw))

                self.model = model
                self.last_predicted_time = 0
                self.last_predicted_confidence = 0
                self.last_predicted_emotion = ""
    def load_model(self):
        """
			This method loads a created model from the drive.
		"""
        model = None
        with tf.Graph().as_default():
            self.create_network()
            model = DNN(self.network, tensorboard_verbose=0)
            model.load("./classifier/models/classifier.tflearn")
            self.model = model
def load_model():
    model = None
    with tf.Graph().as_default():
        print "loading pretrained model..."
        network = build_model()
        model = DNN(network)
        if os.path.isfile(TRAINING.save_model_path):
            model.load(TRAINING.save_model_path)
        else:
            print "Error: file '{}' not found".format(TRAINING.save_model_path)
    return model
def load_model():
    model = None
    with tf.Graph().as_default():
        print("loading pretrained model...")
        network = build_model(use_landmarks=True,
                              use_hog_and_landmarks=True,
                              use_hog_sliding_window_and_landmarks=True)
        model = DNN(network)
        if os.path.isfile(TRAINING.save_model_path_landmarks_hog_sw):
            model.load(TRAINING.save_model_path_landmarks_hog_sw)
        else:
            print("Error: file '{}' not found".format(
                TRAINING.save_model_path_landmarks_hog_sw))
    return model
def load_model():
    model = None
    with tf.Graph().as_default():
        print("loading pretrained model...")
        data, validation, test = load_data(validation=True, test=True)
        network = build_model()
        model = DNN(network)
        if os.path.isfile(TRAINING.save_model_path):
            model.load(TRAINING.save_model_path)
        else:
            print("Error: file '{}' not found".format(TRAINING.save_model_path))

        print("--")
        print("Validation samples: {}".format(len(validation['Y'])))
        print("Test samples: {}".format(len(test['Y'])))
        print("--")
        print("evaluating...")
        start_time = time.time()
        validation_accuracy = evaluate(model, validation['X'], validation['X2'], validation['Y'])
        print("  - validation accuracy = {0:.1f}".format(validation_accuracy * 100))
        test_accuracy = evaluate(model, test['X'], test['X2'], test['Y'])
        print("  - test accuracy = {0:.1f}".format(test_accuracy * 100))
        print("  - evalution time = {0:.1f} sec".format(time.time() - start_time))
    return model
def train(optimizer=HYPERPARAMS.optimizer,
          optimizer_param=HYPERPARAMS.optimizer_param,
          learning_rate=HYPERPARAMS.learning_rate,
          keep_prob=HYPERPARAMS.keep_prob,
          learning_rate_decay=HYPERPARAMS.learning_rate_decay,
          decay_step=HYPERPARAMS.decay_step,
          train_model=True):

    print "loading dataset " + DATASET.name + "..."
    if train_model:
        data, validation = load_data(validation=True)
    else:
        data, validation, test = load_data(validation=True, test=True)

    with tf.Graph().as_default():
        print "building model..."
        network = build_model(optimizer, optimizer_param, learning_rate,
                              keep_prob, learning_rate_decay, decay_step)
        model = DNN(network,
                    tensorboard_dir=TRAINING.logs_dir,
                    tensorboard_verbose=0,
                    checkpoint_path=TRAINING.checkpoint_dir,
                    max_checkpoints=TRAINING.max_checkpoints)

        #tflearn.config.init_graph(seed=None, log_device=False, num_cores=6)

        if train_model:
            # Training phase
            print "start training..."
            print "  - emotions = {}".format(NETWORK.output_size)
            print "  - optimizer = '{}'".format(optimizer)
            print "  - learning_rate = {}".format(learning_rate)
            print "  - learning_rate_decay = {}".format(learning_rate_decay)
            print "  - otimizer_param ({}) = {}".format(
                'beta1' if optimizer == 'adam' else 'momentum',
                optimizer_param)
            print "  - keep_prob = {}".format(keep_prob)
            print "  - epochs = {}".format(TRAINING.epochs)
            print "  - use landmarks = {}".format(NETWORK.use_landmarks)
            print "  - use hog + landmarks = {}".format(
                NETWORK.use_hog_and_landmarks)
            print "  - use hog sliding window + landmarks = {}".format(
                NETWORK.use_hog_sliding_window_and_landmarks)
            print "  - use batchnorm after conv = {}".format(
                NETWORK.use_batchnorm_after_conv_layers)
            print "  - use batchnorm after fc = {}".format(
                NETWORK.use_batchnorm_after_fully_connected_layers)

            start_time = time.time()
            if NETWORK.use_landmarks:
                model.fit([data['X'], data['X2']],
                          data['Y'],
                          validation_set=([validation['X'],
                                           validation['X2']], validation['Y']),
                          snapshot_step=TRAINING.snapshot_step,
                          show_metric=TRAINING.vizualize,
                          batch_size=TRAINING.batch_size,
                          n_epoch=TRAINING.epochs)
            else:
                model.fit(data['X'],
                          data['Y'],
                          validation_set=(validation['X'], validation['Y']),
                          snapshot_step=TRAINING.snapshot_step,
                          show_metric=TRAINING.vizualize,
                          batch_size=TRAINING.batch_size,
                          n_epoch=TRAINING.epochs)
                validation['X2'] = None
            training_time = time.time() - start_time
            print "training time = {0:.1f} sec".format(training_time)

            if TRAINING.save_model:
                print "saving model..."
                model.save(TRAINING.save_model_path)
                if not(os.path.isfile(TRAINING.save_model_path)) and \
                        os.path.isfile(TRAINING.save_model_path + ".meta"):
                    os.rename(TRAINING.save_model_path + ".meta",
                              TRAINING.save_model_path)

            print "evaluating..."
            validation_accuracy = evaluate(model, validation['X'],
                                           validation['X2'], validation['Y'])
            print "  - validation accuracy = {0:.1f}".format(
                validation_accuracy * 100)
            return validation_accuracy
        else:
            # Testing phase : load saved model and evaluate on test dataset
            print "start evaluation..."
            print "loading pretrained model..."
            if os.path.isfile(TRAINING.save_model_path):
                model.load(TRAINING.save_model_path)
            else:
                print "Error: file '{}' not found".format(
                    TRAINING.save_model_path)
                exit()

            if not NETWORK.use_landmarks:
                validation['X2'] = None
                test['X2'] = None

            print "--"
            print "Validation samples: {}".format(len(validation['Y']))
            print "Test samples: {}".format(len(test['Y']))
            print "--"
            print "evaluating..."
            start_time = time.time()
            validation_accuracy = evaluate(model, validation['X'],
                                           validation['X2'], validation['Y'])
            print "  - validation accuracy = {0:.1f}".format(
                validation_accuracy * 100)
            test_accuracy = evaluate(model, test['X'], test['X2'], test['Y'])
            print "  - test accuracy = {0:.1f}".format(test_accuracy * 100)
            print "  - evalution time = {0:.1f} sec".format(time.time() -
                                                            start_time)
            return test_accuracy
Exemple #7
0
class FireDetector:
    def __init__(self,
                 height=INPUT_HEIGHT,
                 width=INPUT_WIDTH,
                 n_channels=NUMBER_CHANNELS):
        self.height = height
        self.width = width
        self.n_channels = n_channels

        self.logger = create_logger('Fire Detector')

        self._build_network()

    def _build_network(self):
        self.logger.info('Started CNN structure construction')
        network = input_data(shape=[None, self.height, self.width, 3],
                             dtype=float32)

        network = conv_2d(network, 64, 5, strides=4, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = conv_2d(network, 128, 4, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = conv_2d(network, 256, 1, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)

        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)

        network = fully_connected(network, 2, activation='softmax')

        network = regression(network,
                             optimizer='momentum',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        self.cnn_ = DNN(network,
                        checkpoint_path='firenet',
                        max_checkpoints=1,
                        tensorboard_verbose=2)
        self.logger.info('Finished CNN structure construction')

    def load_weights(self, weights_path):
        self.logger.info('Loading weights...')
        self.cnn_.load(weights_path, weights_only=True)
        self.logger.info('Weights loaded successfully')

    def predict(self, images):
        images = self._ensure_expected_shape(images)
        predictions = self.cnn_.predict(images)
        predictions = [pred[0] for pred in predictions]
        return predictions

    def _ensure_expected_shape(self, images):
        images_reshaped = []
        expected_shape = (self.height, self.width, self.n_channels)

        for img in images:
            if img.shape != (expected_shape):
                img = reshape_image(img, self.height, self.width)
            images_reshaped.append(img)

        return images_reshaped
Exemple #8
0
network = input_data(shape=[None, 48, 48, 1], name='input')
network = conv_2d(network, nb_filter=32, filter_size=[7, 7], activation='relu')
network = max_pool_2d(network, kernel_size=2)
network = conv_2d(network, nb_filter=64, filter_size=[7, 7], activation='relu')
network = max_pool_2d(network, kernel_size=2)
network = fully_connected(network, n_units=512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, n_units=7, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='targets')
# abstract the network into a file
model = DNN(network)
model.load("model/model.tfl")


def predict(input_image):
    prediction = model.predict(input_image)
    print(prediction)

    predicted_class = np.argmax(prediction)
    print(predicted_class)

    meaning_list = [
        'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'
    ]
    print(meaning_list[predicted_class])

    plt.imshow(input_image, interpolation='bicubic')
    def LoadTable(self):
        # SVM
        def evaluate(model, X, Y):
            predicted_Y = model.predict(X)
            accuracy = accuracy_score(Y, predicted_Y)
            return accuracy

        def evaluate_cnn(model, X, X2, Y, use_landmarks=False):
            if use_landmarks:
                accuracy = model.evaluate([X, X2], Y)
            else:
                accuracy = model.evaluate(X, Y)
            return accuracy[0]

        lstfeature = ["landmarks", "landmarks_and_hog", "landmarks_and_hog_sw"]
        for feature in lstfeature:
            model = None
            with tf.Graph().as_default():
                print("loading pretrained model...")
                if (feature == "landmarks_and_hog"):
                    data, validation, test = load_data_svm(validation=True,
                                                           test=True,
                                                           feature=feature)
                    if os.path.isfile(
                            TRAININGSVM.save_model_path_landmarks_hog):
                        with open(TRAININGSVM.save_model_path_landmarks_hog,
                                  'rb') as f:
                            model = cPickle.load(f)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAININGSVM.save_model_path_landmarks_hog))
                    print("--")
                    print("Validation samples landmarks_and_hog: {}".format(
                        len(validation['Y'])))
                    print("Test samples landmarks_and_hog: {}".format(
                        len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate(model, validation['X'],
                                                   validation['Y'])
                    print("  - validation accuracy = {0:.1f}".format(
                        validation_accuracy * 100))
                    test_accuracy = evaluate(model, test['X'], test['Y'])
                    print("  - test accuracy = {0:.1f}".format(test_accuracy *
                                                               100))
                    print(
                        "  - evalution time = {0:.1f} sec".format(time.time() -
                                                                  start_time))
                    PARAMETERINPUT.Validation_faceLandmarksHoG_svm = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarksHoG_svm = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarksHoG_svm = "{0:.1f}".format(
                        time.time() - start_time)
                elif (feature == "landmarks_and_hog_sw"):
                    data, validation, test = load_data_svm(validation=True,
                                                           test=True,
                                                           feature=feature)
                    if os.path.isfile(
                            TRAININGSVM.save_model_path_landmarks_hog_sw):
                        with open(TRAININGSVM.save_model_path_landmarks_hog_sw,
                                  'rb') as f:
                            model = cPickle.load(f)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAININGSVM.save_model_path_landmarks_hog_sw))
                    print("--")
                    print("Validation samples landmarks_and_hog_sw: {}".format(
                        len(validation['Y'])))
                    print("Test samples landmarks_and_hog_sw: {}".format(
                        len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate(model, validation['X'],
                                                   validation['Y'])
                    print("  - validation accuracy = {0:.1f}".format(
                        validation_accuracy * 100))
                    test_accuracy = evaluate(model, test['X'], test['Y'])
                    print("  - test accuracy = {0:.1f}".format(test_accuracy *
                                                               100))
                    print(
                        "  - evalution time = {0:.1f} sec".format(time.time() -
                                                                  start_time))
                    PARAMETERINPUT.Validation_faceLandmarksHoGSlidingWindow_svm = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarksHoGSlidingWindow_svm = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarksHoGSlidingWindow_svm = "{0:.1f}".format(
                        time.time() - start_time)
                else:
                    data, validation, test = load_data_svm(validation=True,
                                                           test=True,
                                                           feature=feature)
                    if os.path.isfile(TRAININGSVM.save_model_path_landmarks):
                        with open(TRAININGSVM.save_model_path_landmarks,
                                  'rb') as f:
                            model = cPickle.load(f)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAININGSVM.save_model_path_landmarks))
                    print("--")
                    print("Validation samples landmarks: {}".format(
                        len(validation['Y'])))
                    print("Test samples landmarks: {}".format(len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate(model, validation['X'],
                                                   validation['Y'])
                    print("  - validation accuracy = {0:.1f}".format(
                        validation_accuracy * 100))
                    test_accuracy = evaluate(model, test['X'], test['Y'])
                    print("  - test accuracy = {0:.1f}".format(test_accuracy *
                                                               100))
                    print(
                        "  - evalution time = {0:.1f} sec".format(time.time() -
                                                                  start_time))
                    PARAMETERINPUT.Validation_faceLandmarks_svm = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarks_svm = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarks_svm = "{0:.1f}".format(
                        time.time() - start_time)

        #CNN
        lstfeature = [
            "landmarks", "landmarks_and_hog", "raw", "landmarks_and_hog_sw"
        ]
        for feature in lstfeature:
            model = None
            with tf.Graph().as_default():
                print("loading pretrained model...")
                if (feature == "landmarks_and_hog"):
                    data, validation, test = load_data_cnn(
                        validation=True,
                        test=True,
                        use_landmarks=False,
                        use_hog_and_landmarks=True,
                        use_hog_sliding_window_and_landmarks=False)
                    network = build_model(
                        use_landmarks=True,
                        use_hog_and_landmarks=True,
                        use_hog_sliding_window_and_landmarks=False)
                    model = DNN(network)
                    if os.path.isfile(TRAINING.save_model_path_landmarks_hog):
                        model.load(TRAINING.save_model_path_landmarks_hog)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAINING.save_model_path_landmarks_hog))

                    print("--")
                    print("Validation samples: {}".format(len(
                        validation['Y'])))
                    print("Test samples: {}".format(len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate_cnn(model,
                                                       validation['X'],
                                                       validation['X2'],
                                                       validation['Y'],
                                                       use_landmarks=True)
                    print(
                        "  - validation accuracy landmarks_and_hog = {0:.1f}".
                        format(validation_accuracy * 100))
                    test_accuracy = evaluate_cnn(model,
                                                 test['X'],
                                                 test['X2'],
                                                 test['Y'],
                                                 use_landmarks=True)
                    print(
                        "  - test accuracy landmarks_and_hog = {0:.1f}".format(
                            test_accuracy * 100))
                    print("  - evalution time landmarks_and_hog = {0:.1f} sec".
                          format(time.time() - start_time))
                    PARAMETERINPUT.Validation_faceLandmarksHoG_cnn = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarksHoG_cnn = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarksHoG_cnn = "{0:.1f}".format(
                        time.time() - start_time)
                elif (feature == "landmarks_and_hog_sw"):
                    data, validation, test = load_data_cnn(
                        validation=True,
                        test=True,
                        use_landmarks=True,
                        use_hog_and_landmarks=True,
                        use_hog_sliding_window_and_landmarks=True)
                    network = build_model(
                        use_landmarks=True,
                        use_hog_and_landmarks=True,
                        use_hog_sliding_window_and_landmarks=True)
                    model = DNN(network)
                    if os.path.isfile(
                            TRAINING.save_model_path_landmarks_hog_sw):
                        model.load(TRAINING.save_model_path_landmarks_hog_sw)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAINING.save_model_path_landmarks_hog_sw))

                    print("--")
                    print("Validation samples: {}".format(len(
                        validation['Y'])))
                    print("Test samples: {}".format(len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate_cnn(model,
                                                       validation['X'],
                                                       validation['X2'],
                                                       validation['Y'],
                                                       use_landmarks=True)
                    print(
                        "  - validation accuracy landmarks_and_hog_sw = {0:.1f}"
                        .format(validation_accuracy * 100))
                    test_accuracy = evaluate_cnn(model,
                                                 test['X'],
                                                 test['X2'],
                                                 test['Y'],
                                                 use_landmarks=True)
                    print("  - test accuracy landmarks_and_hog_sw = {0:.1f}".
                          format(test_accuracy * 100))
                    print(
                        "  - evalution time landmarks_and_hog_sw = {0:.1f} sec"
                        .format(time.time() - start_time))
                    PARAMETERINPUT.Validation_faceLandmarksHoGSlidingWindow_cnn = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarksHoGSlidingWindow_cnn = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarksHoGSlidingWindow_cnn = "{0:.1f}".format(
                        time.time() - start_time)
                elif (feature == "landmarks"):
                    data, validation, test = load_data_cnn(
                        validation=True,
                        test=True,
                        use_landmarks=True,
                        use_hog_and_landmarks=False,
                        use_hog_sliding_window_and_landmarks=False)
                    network = build_model(
                        use_landmarks=True,
                        use_hog_and_landmarks=False,
                        use_hog_sliding_window_and_landmarks=False)
                    model = DNN(network)
                    if os.path.isfile(TRAINING.save_model_path_landmarks):
                        model.load(TRAINING.save_model_path_landmarks)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAINING.save_model_path_landmarks))

                    print("--")
                    print("Validation samples: {}".format(len(
                        validation['Y'])))
                    print("Test samples: {}".format(len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate_cnn(model,
                                                       validation['X'],
                                                       validation['X2'],
                                                       validation['Y'],
                                                       use_landmarks=True)
                    print("  - validation accuracy landmarks = {0:.1f}".format(
                        validation_accuracy * 100))
                    test_accuracy = evaluate_cnn(model,
                                                 test['X'],
                                                 test['X2'],
                                                 test['Y'],
                                                 use_landmarks=True)
                    print("  - test accuracy landmarks = {0:.1f}".format(
                        test_accuracy * 100))
                    print("  - evalution time landmarks = {0:.1f} sec".format(
                        time.time() - start_time))
                    PARAMETERINPUT.Validation_faceLandmarks_cnn = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_faceLandmarks_cnn = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_faceLandmarks_cnn = "{0:.1f}".format(
                        time.time() - start_time)

                else:
                    data, validation, test = load_data(validation=True,
                                                       test=True)
                    network = build_model()
                    model = DNN(network)
                    # Testing phase : load saved model and evaluate on test dataset
                    print("start evaluation...")
                    print("loading pretrained model...")
                    if os.path.isfile(TRAINING.save_model_path_raw):
                        model.load(TRAINING.save_model_path_raw)
                    else:
                        print("Error: file '{}' not found".format(
                            TRAINING.save_model_path_raw))
                        exit()

                    if not NETWORK.use_landmarks:
                        validation['X2'] = None
                        test['X2'] = None

                    print("--")
                    print("Validation samples: {}".format(len(
                        validation['Y'])))
                    print("Test samples: {}".format(len(test['Y'])))
                    print("--")
                    print("evaluating...")
                    start_time = time.time()
                    validation_accuracy = evaluate_cnn(model, validation['X'],
                                                       validation['X2'],
                                                       validation['Y'])
                    print("  - validation accuracy raw = {0:.1f}".format(
                        validation_accuracy * 100))
                    test_accuracy = evaluate_cnn(model, test['X'], test['X2'],
                                                 test['Y'])
                    print("  - test accuracy raw = {0:.1f}".format(
                        test_accuracy * 100))
                    print("  - evalution time raw = {0:.1f} sec".format(
                        time.time() - start_time))
                    PARAMETERINPUT.Validation_raw_cnn = "{0:.1f}".format(
                        validation_accuracy * 100)
                    PARAMETERINPUT.Test_raw_cnn = "{0:.1f}".format(
                        test_accuracy * 100)
                    PARAMETERINPUT.Time_raw_cnn = "{0:.1f}".format(
                        time.time() - start_time)

        self.treeview.insert(
            '',
            'end',
            text="CNN",
            values=(
                PARAMETERINPUT.Validation_raw_cnn,
                PARAMETERINPUT.Validation_faceLandmarks_cnn,
                PARAMETERINPUT.Validation_faceLandmarksHoG_cnn,
                PARAMETERINPUT.Validation_faceLandmarksHoGSlidingWindow_cnn))
        self.treeview.insert(
            '',
            'end',
            text="SVM",
            values=(
                '----', PARAMETERINPUT.Validation_faceLandmarks_svm,
                PARAMETERINPUT.Validation_faceLandmarksHoG_svm,
                PARAMETERINPUT.Validation_faceLandmarksHoGSlidingWindow_svm))
    model = None
    with tf.Graph().as_default():
        print("loading pretrained model...")
        if (feature == "landmarks_and_hog"):
            data, validation, test = load_data_cnn(
                validation=True,
                test=True,
                use_landmarks=False,
                use_hog_and_landmarks=True,
                use_hog_sliding_window_and_landmarks=False)
            network = build_model(use_landmarks=True,
                                  use_hog_and_landmarks=True,
                                  use_hog_sliding_window_and_landmarks=False)
            model = DNN(network)
            if os.path.isfile(TRAINING.save_model_path_landmarks_hog):
                model.load(TRAINING.save_model_path_landmarks_hog)
            else:
                print("Error: file '{}' not found".format(
                    TRAINING.save_model_path_landmarks_hog))

            print("--")
            print("Validation samples: {}".format(len(validation['Y'])))
            print("Test samples: {}".format(len(test['Y'])))
            print("--")
            print("evaluating...")
            start_time = time.time()
            validation_accuracy = evaluate(model,
                                           validation['X'],
                                           validation['X2'],
                                           validation['Y'],
                                           use_landmarks=True)
from tflearn.layers.core import input_data, fully_connected
from tflearn.layers.estimator import regression

from PyGameSnakeNN import retro_snake

# Create the Network
network = input_data(shape=[None, 5, 1], name='input')
network = fully_connected(network, 25, activation='relu')
network = fully_connected(network, 1, activation='linear')
network = regression(network, optimizer='adam', learning_rate=1e-2, loss='mean_square', name='target')

# Create the model
model = DNN(network, tensorboard_dir='log')

# Load training data
X = np.array([i[0] for i in x]).reshape(-1, 5, 1)
Y = np.array([i[0] for i in y]).reshape(-1, 1)

# train the NN
NN_filename = "model.h5"
model.fit(X, Y, n_epoch=3, shuffle=True, run_id=NN_filename)

# load trained model
model.load("model.h5", weights_only=True)

# Let the NN play a game
s2 = retro_snake(gui=True) # retro_snake is a class which contains the Snake game
input_vect, output_vect = s2.play(testNN=True, _model=model) # Play method runs the game with trained NN model


Exemple #12
0
            return int(obj)
        elif isinstance(obj, numpy.floating):
            return float(obj)
        elif isinstance(obj, numpy.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)


app = Flask(__name__, static_url_path="")
CORS(app)
model_path = "./Model/fjra_30.tfl"

network = Network.Define()
model = DNN(network)
model.load(model_path)
cascade = CascadeClassifier("./Utils/cascade.xml")


@app.errorhandler(400)
def not_found(error):
    return make_response(jsonify({'error': 'Bad request'}), 400)


@app.errorhandler(404)
def not_found(error):
    return make_response(jsonify({'error': 'Not found'}), 404)


@app.route('/emotions/api/v1.0/recognition', methods=['POST'])
def image():
Exemple #13
0
class DNNBackend(BaseBackend):
    def create_model(self):
        """
        Creates DNN model that is based on built algorithm.

        Needed algorithm is builded with self.build_algorithm call.
        """
        self.log_named("model creation started")
        if self.algorithm is not None:
            self.model = DNN(self.algorithm,
                             checkpoint_path=self.checkpoints_dir_path,
                             max_checkpoints=1,
                             tensorboard_verbose=3,
                             tensorboard_dir=self.learn_logs_dir_path)
            self.log_named("model creation finished")
        else:
            self.log_named_warning(
                "model was not created, because algorithm is None!")

    def save_model(self):
        """
        Saves created DNN model to a file.

        Path to is got from self.model_file_path property.
        """
        if self.model is not None:
            self.model.save(self.model_file_path)
            self.log_named("model saved")
        else:
            self.log_named_warning(
                "model file was not saved, because model is None!")

    def load_model(self):
        """
        Loads saved DNN model from a file.

        Path to is got from self.model_file_path property.
        """
        if self.model is not None:
            if os.path.exists(self.model_file_dir_path) and len(
                    os.listdir(self.model_file_dir_path)):
                self.model.load(self.model_file_path)
                self.log_named("model loaded")
            else:
                self.log_named_warning("model file doesn't exist!")
        else:
            self.log_named_warning("model is None!")

    def restore_model_learning(self):
        """
        Restores model learning from the last checkpoint if such exists.
        """
        if self.model is not None:
            if os.path.exists(self.checkpoints_dir_path):
                with open(os.path.join(self.checkpoints_dir_path,
                                       'checkpoint')) as checkpoint_file:
                    self.model.load(
                        checkpoint_file.readline().split(': ')[-1][1:-2])
                    self.learn_model()
                    self.save_model()
            else:
                self.log_named_warning("checkpoints directory doesn't exist!")
        else:
            self.log_named_warning(
                "can't restore model learning process, because model is None!")
class CowClassifier(object):
    """ Cow classifier """
    def __init__(self):
        """ default constructor """
        # Image
        self.image_size = 32  # 32x32

        # tensorflow network variables
        self.tf_img_prep = None
        self.tf_img_aug = None
        self.tf_network = None
        self.tf_model = None

        # 1: setup image preprocessing
        self.setup_image_preprocessing()

        # 2: setup neural network
        self.setup_nn_network()

    def setup_image_preprocessing(self):
        """ Setup image preprocessing """
        # normalization of images
        self.tf_img_prep = ImagePreprocessing()
        self.tf_img_prep.add_featurewise_zero_center()
        self.tf_img_prep.add_featurewise_stdnorm()

        # Randomly create extra image data by rotating and flipping images
        self.tf_img_aug = ImageAugmentation()
        self.tf_img_aug.add_random_flip_leftright()
        self.tf_img_aug.add_random_rotation(max_angle=30.)

    def setup_nn_network(self):
        """ Setup neural network structure """

        # our input is an image of 32 pixels high and wide with 3 channels (RGB)
        # we will also preprocess and create synthetic images
        self.tf_network = input_data(
            shape=[None, self.image_size, self.image_size, 3],
            data_preprocessing=self.tf_img_prep,
            data_augmentation=self.tf_img_aug)

        # layer 1: convolution layer with 32 filters (each being 3x3x3)
        layer_conv_1 = conv_2d(self.tf_network,
                               32,
                               3,
                               activation='relu',
                               name='conv_1')

        # layer 2: max pooling layer
        self.tf_network = max_pool_2d(layer_conv_1, 2)

        # layer 3: convolution layer with 64 filters
        layer_conv_2 = conv_2d(self.tf_network,
                               64,
                               3,
                               activation='relu',
                               name='conv_2')

        # layer 4: Another convolution layer with 64 filters
        layer_conv_3 = conv_2d(layer_conv_2,
                               64,
                               3,
                               activation='relu',
                               name='conv_3')

        # layer 5: Max pooling layer
        self.tf_network = max_pool_2d(layer_conv_3, 2)

        # layer 6: Fully connected 512 node layer
        self.tf_network = fully_connected(self.tf_network,
                                          512,
                                          activation='relu')

        # layer 7: Dropout layer (removes neurons randomly to combat overfitting)
        self.tf_network = dropout(self.tf_network, 0.5)

        # layer 8: Fully connected layer with two outputs (cow or non cow class)
        self.tf_network = fully_connected(self.tf_network,
                                          2,
                                          activation='softmax')

        # define how we will be training our network
        accuracy = Accuracy(name="Accuracy")
        self.tf_network = regression(self.tf_network,
                                     optimizer='adam',
                                     loss='categorical_crossentropy',
                                     learning_rate=0.0005,
                                     metric=accuracy)

    def load_model(self, model_path):
        """ Load model """
        self.tf_model = DNN(self.tf_network, tensorboard_verbose=0)
        self.tf_model.load(model_path)

    def predict_image(self, image_path):
        """ Predict image """
        # Load the image file
        img = scipy.ndimage.imread(image_path, mode="RGB")

        # Scale it to 32x32
        img = scipy.misc.imresize(img, (32, 32),
                                  interp="bicubic").astype(np.float32,
                                                           casting='unsafe')

        # Predict
        return self.tf_model.predict([img])
Exemple #15
0
class Bot:
    def __init__(self):
        self.words = []
        self.labels = []
        self.docs_x = []
        self.docs_y = []
        self.stemmer = LancasterStemmer()
        self.data = []
        self.training = []
        self.output = []
        self.out_empty=[]
        self.model=[]
        self.count=-1
        self.say=""
        self.Network=Network()

    def read(self):
        with open("src/models/intents.json") as f:
            self.data=load(f)
    def dump(self):
        with open("src/models/data.pickle", "wb") as f:
            dump((self.words, self.labels, self.training, self.output), f)
    def stem(self):
        for intent in self.data["intents"]:
            for pattern in intent["patterns"]:
                wrds = word_tokenize(pattern)
                self.words.extend(wrds)
                self.docs_x.append(wrds)
                self.docs_y.append(intent["tag"])

            if intent["tag"] not in self.labels:
                self.labels.append(intent["tag"])

        self.words = [self.stemmer.stem(w.lower()) for w in self.words if w != "?"]
        self.words = sorted(list(set(self.words)))
        self.labels = sorted(self.labels)
    def modelsetup(self):
        self.out_empty = [0 for _ in range(len(self.labels))]

        for x, doc in enumerate(self.docs_x):
            bag = []

            wrds = [self.stemmer.stem(w.lower()) for w in doc]

            for w in self.words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = self.out_empty[:]
            output_row[self.labels.index(self.docs_y[x])] = 1
            self.training.append(bag)
            self.output.append(output_row)

        self.training = array(self.training)
        self.output = array(self.output)
        self.dump()

    def setup(self):
        ops.reset_default_graph()
        net = input_data(shape=[None, len(self.training[0])])
        net = fully_connected(net, 10)
        net = fully_connected(net, 10)
        net = fully_connected(net, len(self.output[0]), activation="softmax")
        net = regression(net)
        self.model = DNN(net)
        if exists("src/models/model.tflearn.index"):
            self.model.load("src/models/model.tflearn")
        else:
            self.model.fit(self.training, self.output, n_epoch=1000, batch_size=8, show_metric=True)
            self.model.save("src/models/model.tflearn")
    def indexWord(self,x,word):
        x=x.split(" ")
        ch=""
        for i in x:
            if i.find(word)!=-1:
                ch=i
        return ch
    def bag_of_words(self,s, words):
        bag = [0 for _ in range(len(words))]
        translate=[]
        s_words = word_tokenize(s)
        s_words = [self.stemmer.stem(word.lower()) for word in s_words]

        for se in s_words:
            for i, w in enumerate(words):
                if w == se:
                    bag[i] = 1
                if se not in words and se not in translate:
                    translate.append(se)

        return array(bag),translate
    def chat(self,x,ui):
        try:
            self.count+=1
            predinp,translate=self.bag_of_words(x, self.words)
            if translate:
                translate=self.indexWord(str(x),translate[0])
                print(translate)
            results = self.model.predict([predinp])
            results_index = argmax(results)
            tag = self.labels[results_index]
        except Exception as e:
            print(e)
        try:
            if results[0][results_index] > 0.4:
                for tg in self.data["intents"]:
                    if tg['tag'] == tag:
                        responses = tg['responses']
                self.say=choice(responses)
                if self.say=="Looking up":
                    self.say=self.Network.Connect(translate.upper())
                    ui.textEdit.setText(self.say)
                else:
                    ui.textEdit.setText(self.say)
            else:
                self.say="Sorry i can't understand i am still learning try again."
                ui.textEdit.setText(self.say)
        except Exception as e:
            print(e)
Exemple #16
0
def foo(img_fn, model_fn='../data/model/model_weights'):
    img = cv2.imread(img_fn, cv2.IMREAD_GRAYSCALE)

    haar_fn = '../data/haarcascade_russian_plate_number.xml'
    haar = cv2.CascadeClassifier(haar_fn)
    detected = haar.detectMultiScale(img)
    plates = []
    for x, y, w, h in detected:
        obj = img[y:y + h, x:x + w]
        plates.append(obj)

    chars = plates[0] < filters.threshold_minimum(plates[0])

    labeled_chars, a = ndi.label(chars)
    labeled_chars = (labeled_chars > 1).astype(np.int8)

    c = measure.find_contours(labeled_chars, .1)

    letters = []
    for i, v in enumerate(c):
        xs, ys = zip(*[i for i in v])
        x = int(min(xs))
        y = int(min(ys))
        w = int(max(xs) - x + 2)
        h = int(max(ys) - y + 2)
        if w < 15:
            continue
        letters.append((y, x, h, w))

    letters = sorted(letters)

    letters_img = [plates[0][x:x + w, y:y + h] for y, x, h, w in letters]

    letters_img = [i for i in letters_img if i[0, 0] > 127]

    sizes = [image.size for image in letters_img]
    median = np.median(sizes)
    allowed_size = median + median / 4

    letters_img = [image for image in letters_img if image.size < allowed_size]

    size = 64

    normalized_img = []
    for i in letters_img:
        ratio = i.shape[0] / i.shape[1]
        img1 = transform.resize(i, [size, int(size / ratio)], mode='constant')
        width = img1.shape[1]
        missing = (size - width) // 2
        ones = np.ones([size, missing])
        img2 = np.append(ones, img1, 1)
        img3 = np.append(img2, ones, 1)
        if 2 * missing + width != size:
            one = np.ones([size, 1])
            img4 = np.append(img3, one, 1)
        else:
            img4 = img3
        normalized_img.append(img4 * 255)

    net_input = input_data(shape=[None, 64, 64, 1])

    conv1 = conv_2d(net_input,
                    nb_filter=4,
                    filter_size=5,
                    strides=[1, 1, 1, 1],
                    activation='relu')
    max_pool1 = max_pool_2d(conv1, kernel_size=2)

    conv2 = conv_2d(max_pool1,
                    nb_filter=8,
                    filter_size=5,
                    strides=[1, 2, 2, 1],
                    activation='relu')
    max_pool2 = max_pool_2d(conv2, kernel_size=2)

    conv3 = conv_2d(max_pool2,
                    nb_filter=12,
                    filter_size=4,
                    strides=[1, 1, 1, 1],
                    activation='relu')
    max_pool3 = max_pool_2d(conv3, kernel_size=2)

    fc1 = fully_connected(max_pool3, n_units=200, activation='relu')
    drop1 = dropout(fc1, keep_prob=.5)

    fc2 = fully_connected(drop1, n_units=36, activation='softmax')
    net = regression(fc2)

    model = DNN(network=net)
    model.load(model_file=model_fn)

    labels = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')

    predicted = []
    for i in normalized_img:
        y = model.predict(i.reshape([1, 64, 64, 1]))
        y_pred = np.argmax(y[0])
        predicted.append(labels[y_pred])

    return ''.join(predicted)