Esempio n. 1
0
    def __getitem__(self, index):
        image_info = self._get_image_info(index)
        image = imread(image_info['path'], rootpath=self.rootpath)
        if self.transform is not None:
            image = self.transform(image=image)['image']

        if self.mode == 'infer':
            return image_info['id'], image_info['frame'], image
        return image, image_info['label']
Esempio n. 2
0
def prepare_lbp_dataset(dirpath: str, features_npy: str, targets_csv: str, verbose: bool = True):
    feature_extractor = LBPFeatureExtractor(n_features=59, crop_size=(64, 64))

    features, targets = [], []
    df = find_items(in_dir=dirpath)
    for idx, row in tqdm(df.iterrows(), total=df.shape[0], disable=not verbose):
        image = imread(row['path'], rootpath=dirpath)
        features.append(feature_extractor(image))
        targets.append(row)

    np.save(features_npy, np.stack(features, axis=0))
    pd.DataFrame(targets).to_csv(targets_csv, index=False)
Esempio n. 3
0
def test_imread():
    """
    Tests imread functionality
    """
    jpg_rgb_uri = (
        "https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
        "/test_images/catalyst_icon.jpg")
    jpg_grs_uri = (
        "https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
        "/test_images/catalyst_icon_grayscale.jpg")
    png_rgb_uri = (
        "https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
        "/test_images/catalyst_icon.png")
    png_grs_uri = (
        "https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
        "/test_images/catalyst_icon_grayscale.png")

    for uri in [jpg_rgb_uri, jpg_grs_uri, png_rgb_uri, png_grs_uri]:
        img = imread(uri)
        assert img.shape == (400, 400, 3)
        img = imread(uri, grayscale=True)
        assert img.shape == (400, 400, 1)
Esempio n. 4
0
def prepare_cutout_datasets(in_dir: str, out_dir_crops: str, out_dir_cutout: str, verbose: bool = False):
    df = find_items(in_dir=in_dir)
    face_detector = FaceDetectionEngine(weights_path='./models/s3fd_convert.pth')
    for idx, row in tqdm(df.iterrows(), total=df.shape[0], disable=not verbose):
        image = imread(row['path'], rootpath=in_dir)
        face_crops, cutout_faces = crop_faces(image, face_detector)

        os.makedirs(os.path.join(out_dir_crops, os.path.dirname(row['path'])), exist_ok=True)
        for i, (crop, bbox) in enumerate(face_crops):
            root, ext = os.path.splitext(row['path'])
            cv2.imwrite(os.path.join(out_dir_crops, f'{root}_{i}{ext}'), crop[:, :, ::-1])  # RGB -> BGR

        os.makedirs(os.path.join(out_dir_cutout, os.path.dirname(row['path'])), exist_ok=True)
        cv2.imwrite(os.path.join(out_dir_cutout, row['path']), cutout_faces[:, :, ::-1])  # RGB -> BGR
Esempio n. 5
0
    def __call__(self, row):
        """Reads a row from your annotations dict with filename and
        transfer it to an image

        Args:
            row: elem in your dataset.

        Returns:
            np.ndarray: Image
        """
        image_name = str(row[self.input_key])
        img = imread(
            image_name, rootpath=self.datapath, grayscale=self.grayscale
        )

        result = {self.output_key: img}
        return result
Esempio n. 6
0
def predict_sample(image_info, model, rootpath: str = None):
    print('predict')
    image = imread(image_info['path'], rootpath=rootpath)
    probability = model.predict(image)

    return image_info['id'], image_info['frame'], probability