Exemplo n.º 1
0
def main(FLAG):
    Model = SimpleModel(FLAG.input_dim,
                        FLAG.hidden_dim,
                        FLAG.output_dim,
                        optimizer=tf.train.RMSPropOptimizer(
                            FLAG.learning_rate))

    image, label = load_dataset()
    image, label = image_augmentation(image,
                                      label,
                                      horizon_flip=True,
                                      control_brightness=True)
    label = label / 96.
    (train_X, train_y), (valid_X,
                         valid_y), (test_X, test_y) = split_data(image, label)

    if FLAG.Mode == "validation":
        lr_list = 10**np.random.uniform(-6, -2, 20)
        Model.validation(train_X, train_y, valid_X, valid_y, lr_list)
    elif FLAG.Mode == "train":
        Model.train(train_X, train_y, valid_X, valid_y, FLAG.batch_size,
                    FLAG.Epoch, FLAG.save_graph, FLAG.save_model)

        pred_Y = Model.predict(test_X[123])
        print(pred_Y)
        print(test_y[123])
        print(np.mean(np.square(pred_Y - test_y[123])))
Exemplo n.º 2
0
def main(FLAG):
    Model = SimpleModel(FLAG.input_dim,
                        FLAG.hidden_dim,
                        FLAG.output_dim,
                        optimizer=tf.train.RMSPropOptimizer(
                            FLAG.learning_rate),
                        using_gpu=False)

    image_path = sys.argv[1]
    cascPath = "./haarcascade_frontalface_default.xml"

    faceCascade = cv2.CascadeClassifier(cascPath)

    image = cv2.imread(image_path)
    src_height, src_width, src_channels = image.shape
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    for x, y, w, h in faces:
        print("faceLocation : ({},{}), width={}, height={}".format(x, y, w, h))
        cropped_image = gray[x:x + w, y:y + h]
        resized_image = imresize(cropped_image, (FLAG.Width, FLAG.Height))
        resized_image = resized_image.flatten() / 255

        pred_feature = Model.predict(resized_image).flatten()
        pred_feature[::2] = pred_feature[::2] * w + x
        pred_feature[1::2] = pred_feature[1::2] * h + y

    result_img = draw_features_point_on_image(image, [pred_feature], src_width,
                                              src_height)
    print(pred_feature)
    for (x, y, w, h) in faces:
        cv2.rectangle(result_img, (x, y), (x + w, y + h), (0, 255, 0), 1)

    cv2.imshow('Result', result_img)
    cv2.imwrite("./result_img.png", result_img)
    cv2.waitKey(0)

    cv2.destroyAllWindows()