Beispiel #1
0
def register(user_id):
    data_path = Path('data')
    save_path = data_path / 'facebank' / user_id
    fetch_path = data_path / 'dataset' / user_id
    images = load_images_from_folder(fetch_path)
    print(images)
    if not save_path.exists():
        save_path.mkdir()

    mtcnn = MTCNN()
    count = 0
    face_id = user_id
    count = 0
    for img in images:
        frame = img
        p = Image.fromarray(frame[..., ::-1])
        try:
            warped_face = np.array(mtcnn.align(p))[..., ::-1]
            cv2.imwrite(
                "data/facebank/" + str(face_id) + '/' + str(face_id) + '_' +
                str(count) + ".jpg", warped_face)
            count += 1
            #cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), warped_face)
        except:
            result = {
                "_result": "Error",
                "_message": "Unable to detect the face"
            }
    if count == len(images):
        result = {
            "_result": "success",
            "_message": "User Registered Successfully"
        }

    conf = get_config(False)
    learner = face_learner(conf, True)
    learner.load_state(conf, 'cpu_final.pth', True, True)
    learner.model.eval()
    #print('learner loaded')
    targets, names = prepare_facebank(conf, learner.model, mtcnn, user_id)
    #print('facebank updated')
    return result
Beispiel #2
0
def evaluate_model(data_frame, process_data=False):
    """
    """
    encoder = None

    if process_data:
        res = create_image_folders(data_frame,
                                   data_dir=config.DATA_PATH,
                                   output_dir=config.PROCESSED_DATA_PATH)

    images, labels = load_images_from_folder(config.PROCESSED_DATA_PATH)

    with open(config.ENCODER_PATH, "rb") as f:
        encoder = pickle.load(f)

    encoded_y = encoder.fit_transform(labels)

    model = keras.models.load_model(config.MODEL_PATH)
    print(model.summary())
    res = model.evaluate(images, encoded_y, batch_size=16)
    return res
Beispiel #3
0
    sigma = args['sigma']
    num_pretrain = args['num_pretrain']
    svideo = args['svideo']
    sframe = args['sframe']

    #creat paths
    out_path = './' + output_folder + '/'
    if not os.path.exists(out_path):
        os.mkdir(out_path)
    #center position output_folder+'/'
    mosse_txt_name = out_path + video_name + "_centerPos_mosse.txt"
    fm = open(mosse_txt_name, 'w')

    # load shadow masks
    mask_path = './' + video_name + '_mask/'
    masks = load_images_from_folder(mask_path)

    # begin the algorithm
    cap = cv2.VideoCapture(v_name)

    # get ground truth box postion
    f = open(video_name + ".txt", "r")
    line = f.readline().split(',')
    ix = int(line[0])
    iy = int(line[1])
    w = int(line[2])
    h = int(line[3])
    f.close()

    initTracking = True
Beispiel #4
0
    def training_init(self, dataset_dir="dataset", batch_size=8):
        """Initialize the parameters and generators
        :param dataset_dir: File path to dataset directory. With folders system:
            > train
                > data
                > masks
            > valid
                > data
                > masks
        :param batch_size: Number of images in one training iteration
        """
        # TODO: os.path.join
        self.training_init_check = True
        self.batch_size = batch_size

        if self.use_datagenerator:
            # Initialize ImageDataGenerator classes
            data_datagen = ImageDataGenerator(
                rescale=1. / 255,

                height_shift_range=0.2,
                horizontal_flip=True,
                rotation_range=45,
                width_shift_range=0.2,
                zoom_range=[0.5, 1.0]
            )

            masks_datagen = ImageDataGenerator(
                height_shift_range=0.2,
                horizontal_flip=True,
                rotation_range=45,
                width_shift_range=0.2,
                zoom_range=[0.5, 1.0]
            )

            # Initialize data generators
            seed = 909  # to transform image and masks with same augmentation parameter.
            image_generator = data_datagen.flow_from_directory(dataset_dir + "train/data/", class_mode=None,
                                                               seed=seed, batch_size=batch_size,
                                                               target_size=self.dim)
            masks_generator = masks_datagen.flow_from_directory(dataset_dir + "train/masks/", class_mode=None,
                                                                seed=seed, batch_size=batch_size,
                                                                target_size=self.dim, color_mode="grayscale")

            self.train_generator = zip(image_generator, masks_generator)

            if os.path.exists(dataset_dir + "/valid"):
                valid_datagen = ImageDataGenerator(rescale=1. / 255)

                valid_image_generator = valid_datagen.flow_from_directory(dataset_dir + "/valid/masks/",
                                                                          class_mode=None, seed=seed)
                valid_masks_generator = valid_datagen.flow_from_directory(dataset_dir + "/valid/masks/",
                                                                          class_mode=None, seed=seed)

                self.valid_generator = zip(valid_image_generator, valid_masks_generator)
        else:
            x_train = load_images_from_folder(dataset_dir + "/train/data/img", self.shape)
            y_train = load_images_from_folder(dataset_dir + "/train/masks/img", self.dim + (1,))

            self.train_data = (x_train, y_train)

            if os.path.exists(dataset_dir + "/valid/img"):
                x_valid = load_images_from_folder(dataset_dir + "/valid/data/img", self.shape)
                y_valid = load_images_from_folder(dataset_dir + "/valid/masks/img", self.dim + (1,))  # TODO: Add Y num_channels variable to init

                self.valid_data = (x_valid, y_valid)

        # Initialize model
        base_model = keras.applications.xception.Xception(weights='imagenet',
                                                          include_top=False,
                                                          input_shape=self.shape)

        output_layer = residual_model(base_model, dropout_rate=0.5)

        self.model = Model(base_model.input, output_layer)
Beispiel #5
0
        orig_img = frame.copy()
        # img = cv.pyrUp(img)
        dilated = utils.basic_preprocess(frame)
        # Contours
        contours = utils.find_contours(dilated)
        if len(contours) > 0:
            img, crop_rect = utils.draw_corners(dilated, contours)
            img = utils.find_grid(img, crop_rect)
            digits, numbers_descriptors, squares = utils.extract_digits(
                img, True)
            cv.imshow('img before', img)

            # if count % 15 == 0:

            templ = cv.imread('polished.jpg', 0)
            images = utils.load_images_from_folder()
            result = []
            match = digit_detection.flann_matcher(img, templ, False)
            if match > 30:
                print(count)

            if cv.waitKey(40) == 27:

                board = utils.create_board_knn(digits, squares)
                to_solve = np.zeros(img.shape)
                board, to_solve, result = sudoku_solver.solve_sudoku(
                    board, to_solve)
                print(to_solve)
                cv.imshow('img after', to_solve)

                if result:
Beispiel #6
0
]

# inicializando uma lista de cores para representar cada label possível
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")

# carregando os paths do arquivo de pesos e configuração do modelo
weights_path = os.path.sep.join([args["yolo"], "yolov3.weights"])
config_path = os.path.sep.join([args["yolo"], "yolov3.cfg"])

# carregando o YOLO object detector
print('loading YOLO...')
net = cv.dnn.readNetFromDarknet(config_path, weights_path)

# carregando as imagens
images = load_images_from_folder(args["image"])
# office_labels = set()
images_labels = []

# iterando sobre as imagens do diretório
for image in images:
    (h, w) = image.shape[:2]

    # definindo somente as camadas de saída
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    # separando a partir da image inserida e então performando a rede FP do YOLO detector
    # gerando os retângulos e as probabilidades
    blob = cv.dnn.blobFromImage(image,
                                1 / 255.0, (416, 416),
    arg.add_argument('-i',
                     '--image_path',
                     help='link to image',
                     default='./images/1.jpg')

    return arg.parse_args()


# Lấy tham số truyền vào command line
args = get_arguments()

# Khởi động model
model = E2E()

# Đọc tất cả các ảnh test
all_imgs = utils.load_images_from_folder(
    "D:/COMPUTER-VISION-PROJECT-WITH-CODE/Dataset/VNLP_test")

# # Thực nghiệm trên toàn bộ ảnh kiểm tra
# for i in range(len(all_imgs)):
#     # Dự đoán, trả về ảnh dự đoán và giá trị dự đoán
#     image, lpnumber = model.predict(all_imgs[i])
#
#     # In ra console để quan sát kết quả
#     print(lpnumber)
#
#     # Hiển thị ảnh dự đoán
#     cv2.imshow('Result', image)
#     cv2.waitKey(0)
folder = "D:/COMPUTER-VISION-PROJECT-WITH-CODE/Dataset/VNLP_test"
data = {'Filename': [], 'Predict': []}
for filename in os.listdir(folder):
Beispiel #8
0
 def __init__(self, root, transform=None):
     self.imgs = load_images_from_folder(root)
     self.transform = transform