Esempio n. 1
0
def make_masks(image):
    #hog_masks = HOG.make_feature_mask(image)
    #hog_masks_shape = hog_masks.shape
    #print(hog_masks.shape)
    #hog_masks = hog_masks.reshape((hog_masks.shape[0] * hog_masks.shape[1], hog_masks.shape[2]))
    #hog_masks_min = np.min(hog_masks, 0)
    #hog_masks_max = np.max(hog_masks, 0)
    #print('HoG Min[%s] Max[%s]' % (np.min(hog_masks_min), np.max(hog_masks_max)))
    #print(hog_masks.shape)
    #hog_masks = np.array(255 * np.clip(hog_masks, 0, 1), dtype=np.uint8)
    #hog_masks,
    #LBP.make_feature_mask(image_gray,
    #pool_radius=3),
    image = imfeat.convert_image(image, [{
        'type': 'numpy',
        'mode': 'bgr',
        'dtype': 'uint8'
    }])
    image_gray = imfeat.convert_image(image, [{
        'type': 'numpy',
        'mode': 'gray',
        'dtype': 'uint8'
    }])
    image_gradient = np.array(
        GRAD.make_feature_mask(np.array(image_gray, dtype=np.float32) / 255.) *
        255,
        dtype=np.uint8)
    image_lab = imfeat.convert_image(image, [{
        'type': 'numpy',
        'mode': 'lab',
        'dtype': 'uint8'
    }])
    return np.ascontiguousarray(np.dstack([image, image_lab, image_gradient]),
                                dtype=np.uint8)
Esempio n. 2
0
 def test_name(self):
     type_channel_modes = [('opencv', 'bgr', 'uint8'), ('pil', 'rgb', 'uint8'), ('numpy', 'bgr', 'uint8'),
                           ('opencv', 'bgr', 'float32'), ('numpy', 'bgr', 'float32')]
     to_np8 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
     to_np32 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'float32', 'mode': 'bgr'})
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         image_np8 = None
         image_np32 = None
         for i in load_images(fn):
             if image_np8 is None:
                 image_np8 = to_np8(i)
                 image_np32 = to_np32(i)
                 np.testing.assert_equal(image_np8, np.array(image_np32 * 255, dtype=np.uint8))
             for t, c, m in type_channel_modes:
                 cur_img = imfeat.convert_image(i, {'type': t, 'dtype': m, 'mode': c})
                 if t == 'opencv':
                     self.assertTrue(isinstance(cur_img, cv.iplimage))
                 elif t == 'pil':
                     self.assertTrue(Image.isImageType(cur_img))
                 else:
                     self.assertTrue(isinstance(cur_img, np.ndarray))
                 if m == 'uint8':
                     np.testing.assert_equal(image_np8, to_np8(cur_img))
                 else:
                     np.testing.assert_equal(image_np32, to_np32(cur_img))
Esempio n. 3
0
 def test_name(self):
     type_channel_modes = [('opencv', 'bgr', 'uint8'), ('pil', 'rgb', 'uint8'), ('numpy', 'bgr', 'uint8'),
                           ('opencv', 'bgr', 'float32'), ('numpy', 'bgr', 'float32')]
     to_np8 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
     to_np32 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'float32', 'mode': 'bgr'})
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         image_np8 = None
         image_np32 = None
         for i in load_images(fn):
             if image_np8 is None:
                 image_np8 = to_np8(i)
                 image_np32 = to_np32(i)
                 np.testing.assert_equal(image_np8, np.array(image_np32 * 255, dtype=np.uint8))
             for t, c, m in type_channel_modes:
                 cur_img = imfeat.convert_image(i, {'type': t, 'dtype': m, 'mode': c})
                 if t == 'opencv':
                     self.assertTrue(isinstance(cur_img, cv.iplimage))
                 elif t == 'pil':
                     self.assertTrue(Image.isImageType(cur_img))
                 else:
                     self.assertTrue(isinstance(cur_img, np.ndarray))
                 if m == 'uint8':
                     np.testing.assert_equal(image_np8, to_np8(cur_img))
                 else:
                     np.testing.assert_equal(image_np32, to_np32(cur_img))
Esempio n. 4
0
    def map(self, image_hash, image_data):
        """

        Args:
            image_hash: Unique image string
            image_data: Binary image data

        Yields:
            A tuple in the form of (classifier_name, label_value)
            classifier_name: String representing the classifier
            label_value: (label, feature) where label is an int
        """
        try:
            image = Image.open(StringIO.StringIO(image_data))
        except:
            hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
            return
        bgen = imfeat.BlockGenerator(image,
                                     imfeat.CoordGeneratorRectRotate,
                                     output_size=(self._image_height,
                                                  self._image_width),
                                     step_delta=(self._image_height / 2,
                                                 self._image_width / 2),
                                     angle_steps=1)
        for num, (image_out, sim) in enumerate(bgen):
            feature = np.asfarray(imfeat.compute(self._feat, image_out)[0])
            pred = dict((classifier_name, classifier.predict(feature))
                        for classifier_name, classifier in self._classifiers)
            if any(x for x in pred.values() if x[0][0] *
                   x[0][1] > 0):  # At least 1 class needs to be > 0
                image_out_fp = StringIO.StringIO()
                imfeat.convert_image(image_out,
                                     ['RGB']).save(image_out_fp, 'JPEG')
                image_out_fp.seek(0)
                yield (image_hash, sim), (pred, image_out_fp.read())
    def map(self, image_hash, image_data):
        """

        Args:
            image_hash: Unique image string
            image_data: Binary image data

        Yields:
            A tuple in the form of (classifier_name, label_value)
            classifier_name: String representing the classifier
            label_value: (label, feature) where label is an int
        """
        try:
            image = Image.open(StringIO.StringIO(image_data))
        except:
            hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
            return
        bgen = imfeat.BlockGenerator(image, imfeat.CoordGeneratorRectRotate,
                                     output_size=(self._image_height, self._image_width),
                                     step_delta=(self._image_height / 2, self._image_width / 2), angle_steps=1)
        for num, (image_out, sim) in enumerate(bgen):
            feature = np.asfarray(imfeat.compute(self._feat, image_out)[0])
            pred = dict((classifier_name, classifier.predict(feature))
                        for classifier_name, classifier in self._classifiers)
            if any(x for x in pred.values() if x[0][0] * x[0][1] > 0):  # At least 1 class needs to be > 0
                image_out_fp = StringIO.StringIO()
                imfeat.convert_image(image_out, ['RGB']).save(image_out_fp, 'JPEG')
                image_out_fp.seek(0)
                yield (image_hash, sim), (pred, image_out_fp.read())
Esempio n. 6
0
    def _test_convert(self, rgb_func, gray_func, is_rgb):
        for image_name in image_names:
            try:
                image = rgb_func(image_name)
                image_gray = gray_func(image_name)
            except IOError:
                # OpenCV doesn't like reading in animated gifs,
                # or pallete gifs
                continue
            for mode in modes:
                try:
                    # Explicitly ignore the case of converting Gray to RGB
                    if not is_rgb and not (mode == "L" or "gray" in mode):
                        continue

                    # Use either the gray or the RGB version as input based
                    # on the parameter
                    if is_rgb:
                        actual = imfeat.convert_image(image, [mode])
                    else:
                        actual = imfeat.convert_image(image_gray, [mode])

                    # Compare against the gray or RGB input depending on
                    # the output mode
                    if mode == "L" or "gray" in mode:
                        self._assert_close_rgb(actual, image_gray)
                    else:
                        self._assert_close_rgb(actual, image)
                except:
                    print "Failed case: (convert to %s) %s [input %s]" % (mode, image_name, "rgb" if is_rgb else "gray")
                    raise
def save_display_images(path_hdfs, path_local, min_count,
                        max_count, key_to_path=None):
    """
    Saves the first max_count images obtained by calling
    hadoopy.readtb(path_hdfs).  Each item in the sequence is assumed
    to be of the form (key, (imagedata, boxes)).  The boxes are
    drawn on each image before it is saved to the local path.
    If key_to_path is provided, which maps a key to a path, the image
    corresponding to that key will be saved in key_to_path[key].
    """
    if key_to_path == None:
        key_to_path = {}
    count = 0
    for k, (i, bs) in hadoopy.readtb(path_hdfs):
        if count >= min_count:
            if k in key_to_path:
                path = key_to_path[k]
            else:
                path = path_local
            filename = '%s/%s.jpg' % (path, k)
            im = imfeat.convert_image(Image.open(StringIO.StringIO(i)),
                                      [('opencv', 'bgr', 8)])
            print(k)
            for b in bs:
                cv.Rectangle(im, (b[0], b[1]), (b[2], b[3]),
                             cv.CV_RGB(255, 0, 0), 3)
            cv.SaveImage(filename, im)
        # update count and break loop if necessary
        # TODO(Vlad): can we slice notation on a list of generators?
        count += 1
        if count > max_count:
            break
Esempio n. 8
0
 def test_transparent_png(self):
     """Transparent PNGs read in by PIL (mode RGBA) aren't supported
     """
     image = Image.open("test_images/transparent.png")
     assert image.mode == "RGBA"
     mode = ("opencv", "bgr", cv.IPL_DEPTH_8U)
     actual = imfeat.convert_image(image, [mode])
     self._assert_close_rgb(actual, image)
Esempio n. 9
0
 def test_save_lena(self):
     feat = imfeat.GradientHistogram()
     out = feat.make_feature_mask(imfeat.convert_image(cv2.imread('test_images/lena.jpg'), feat.MODES))
     try:
         os.makedirs('out')
     except OSError:
         pass
     print('m[%s]M[%s]' % (np.min(out), np.max(out)))
     cv2.imwrite('out/lena_gradient.jpg', np.array(out * 255, dtype=np.uint8))
Esempio n. 10
0
 def imread(self, fn):
     out = cv2.imread(fn)
     if out is not None:
         return out
     return imfeat.convert_image(Image.open(fn), {
         'type': 'numpy',
         'dtype': 'uint8',
         'mode': 'bgr'
     })
Esempio n. 11
0
def make_masks(image):
    #hog_masks = HOG.make_feature_mask(image)
    #hog_masks_shape = hog_masks.shape
    #print(hog_masks.shape)
    #hog_masks = hog_masks.reshape((hog_masks.shape[0] * hog_masks.shape[1], hog_masks.shape[2]))
    #hog_masks_min = np.min(hog_masks, 0)
    #hog_masks_max = np.max(hog_masks, 0)
    #print('HoG Min[%s] Max[%s]' % (np.min(hog_masks_min), np.max(hog_masks_max)))
    #print(hog_masks.shape)
    #hog_masks = np.array(255 * np.clip(hog_masks, 0, 1), dtype=np.uint8)
    #hog_masks,
    #LBP.make_feature_mask(image_gray,
    #pool_radius=3),
    image = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}])
    image_gray = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'gray', 'dtype': 'uint8'}])
    image_gradient = np.array(GRAD.make_feature_mask(np.array(image_gray, dtype=np.float32) / 255.) * 255, dtype=np.uint8)
    image_lab = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'lab', 'dtype': 'uint8'}])
    return np.ascontiguousarray(np.dstack([image, image_lab, image_gradient]), dtype=np.uint8)
def test_tb(path):
    """
    This function tests the sequence file at 'path' (on hdfs) by
    reading the images from it.
    """
    # test that we can read each file using _load_cv_image
    for (key, val) in hadoopy.readtb(path):
        print(key)
        i = imfeat.convert_image(Image.open(StringIO.StringIO(val)),
                                 [('opencv', 'gray', 8)])
Esempio n. 13
0
 def test_resize(self):
     to_np8 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
     n = 0
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         for i in load_images(fn):
             for h, w in [(50, 50), (100, 50), (50, 100), (1000, 100), (100, 1000)]:
                 out_arr = to_np8(imfeat.resize_image(i, h, w))
                 self.assertEqual(out_arr.shape, (h, w, 3))
                 #cv2.imwrite('resize-out-%.3d.jpg' % n, out_arr)
                 n += 1
Esempio n. 14
0
 def test_resize(self):
     to_np8 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
     n = 0
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         for i in load_images(fn):
             for h, w in [(50, 50), (100, 50), (50, 100), (1000, 100), (100, 1000)]:
                 out_arr = to_np8(imfeat.resize_image(i, h, w))
                 self.assertEqual(out_arr.shape, (h, w, 3))
                 #cv2.imwrite('resize-out-%.3d.jpg' % n, out_arr)
                 n += 1
def test_tb(path):
    """
    This function tests the sequence file at 'path' (on hdfs) by
    reading the images from it.
    """
    # test that we can read each file using _load_cv_image
    for (key, val) in hadoopy.readtb(path):
        print(key)
        i = imfeat.convert_image(Image.open(StringIO.StringIO(val)),
                                [('opencv', 'gray', 8)])
Esempio n. 16
0
 def test_save_lena(self):
     feat = imfeat.LBP()
     for x in range(10):
         out = feat.make_feature_mask(imfeat.convert_image(cv2.imread('test_images/lena.jpg'), feat.MODES), pool_radius=x)
         print(out.shape)
         out = out.reshape(out.shape[:2])
         try:
             os.makedirs('out')
         except OSError:
             pass
         cv2.imwrite('out/lena_lbp-%d.jpg' % x, out)
Esempio n. 17
0
 def test_save_lena(self):
     feat = imfeat.GradientHistogram()
     out = feat.make_feature_mask(
         imfeat.convert_image(cv2.imread('test_images/lena.jpg'),
                              feat.MODES))
     try:
         os.makedirs('out')
     except OSError:
         pass
     print('m[%s]M[%s]' % (np.min(out), np.max(out)))
     cv2.imwrite('out/lena_gradient.jpg', np.array(out * 255,
                                                   dtype=np.uint8))
Esempio n. 18
0
 def __call__(self, image):
     image = imfeat.convert_image(image, [('opencv', 'gray', 8)])
     keypoints, descriptors = cv.ExtractSURF(image, None, cv.CreateMemStorage(), (0, 500, 3, 4))
     out = []
     for ((x, y), laplacian, size, direction, hessian) in keypoints:
         out.append({'x': x,
                     'y': y,
                     'scale': size,
                     'orientation': direction,
                     'sign': laplacian,
                     'cornerness': hessian})
     return out
Esempio n. 19
0
 def test_save_lena(self):
     feat = imfeat.LBP()
     for x in range(10):
         out = feat.make_feature_mask(imfeat.convert_image(
             cv2.imread('test_images/lena.jpg'), feat.MODES),
                                      pool_radius=x)
         print(out.shape)
         out = out.reshape(out.shape[:2])
         try:
             os.makedirs('out')
         except OSError:
             pass
         cv2.imwrite('out/lena_lbp-%d.jpg' % x, out)
Esempio n. 20
0
 def _predict(self, image):
     # Makes the max size length 320
     max_side = 320
     image = imfeat.convert_image(image, self.MODES)
     sz = np.array([image.shape[1], image.shape[0]])
     sz = np.array(max_side * sz / float(np.max(sz)), dtype=np.int)
     image = cv2.resize(image, tuple(sz))
     image = self._make_masks(image)
     # Predict using both trees
     max_classes1, max_probs1, leaves1, all_probs1 = self.tp.predict(image, leaves=True, all_probs=True)
     pred_integrals = convert_leaves_all_probs_pred(image, leaves1, all_probs1, self.tp.num_leaves)
     max_classes2, max_probs2, all_probs2 = self.tp2.predict(pred_integrals, all_probs=True)
     return max_classes1, max_probs1, leaves1, max_classes2, max_probs2, all_probs2
Esempio n. 21
0
 def _predict(self, image):
     # Makes the max size length 320
     max_side = 320
     image = imfeat.convert_image(image, self.MODES)
     sz = np.array([image.shape[1], image.shape[0]])
     sz = np.array(max_side * sz / float(np.max(sz)), dtype=np.int)
     image = cv2.resize(image, tuple(sz))
     image = self._make_masks(image)
     # Predict using both trees
     max_classes1, max_probs1, leaves1, all_probs1 = self.tp.predict(image, leaves=True, all_probs=True)
     pred_integrals = convert_leaves_all_probs_pred(image, leaves1, all_probs1, self.tp.num_leaves)
     max_classes2, max_probs2, all_probs2 = self.tp2.predict(pred_integrals, all_probs=True)
     return max_classes1, max_probs1, leaves1, max_classes2, max_probs2, all_probs2
Esempio n. 22
0
 def _feature_hist(self, image):
     out = []
     for block in imfeat.BlockGenerator(image,
                                        imfeat.CoordGeneratorRect,
                                        output_size=(self.sbin, self.sbin),
                                        step_delta=(self.sbin, self.sbin)):
         out.append(
             imfeat.convert_image(image, {
                 'type': 'numpy',
                 'dtype': 'float32',
                 'mode': 'lab'
             }).ravel())
     return np.asfarray(out)
Esempio n. 23
0
 def __call__(self, image, num_components=None):
     if num_components is None:
         num_components = self.num_componenets
     image = imfeat.convert_image(image, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'gray'})
     input_path = self.temp_dir + '/input.pgm'
     output_path = self.temp_dir + '/output.ppm'
     #image = Image.fromarray(image)
     #image.save(input_path)
     cv2.imwrite(input_path, image)
     cur_dir = os.path.abspath('.')
     os.chdir(self.temp_dir)
     cmd = './superpixels input.pgm output.ppm %d' % (num_components,)
     subprocess.call(cmd.split())
     out = cv2.imread(output_path)
     os.chdir(cur_dir)
     return out
Esempio n. 24
0
    def test_cvhsv(self):
        """Check that HSV conversion produces correct values for a solid color
        test case
        """

        # Check that HSV produces a 'blue' hue for a blue image
        hsv_target = 120.0

        # Build the image in BGR order
        blue_image = np.tile([255, 0, 0], (10, 10, 1)).astype("u1")
        image = cv.CreateImageHeader((10, 10), cv.IPL_DEPTH_8U, 3)
        cv.SetData(image, blue_image.tostring())
        mode = ("opencv", "hsv", cv.IPL_DEPTH_8U)
        hsv = imfeat.convert_image(image, [mode])
        hsv = cvBGRtoarray(hsv)
        np.testing.assert_allclose(hsv[:, :, 2], hsv_target)
Esempio n. 25
0
 def test_save_lena(self):
     feat = imfeat.HOGLatent(8)
     image_input = imfeat.convert_image(
         cv2.imread('test_images/mosaic_001_01.jpg'), feat.MODES)  #lena.jpg
     num_eq = 0
     num_neq = 0
     try:
         os.makedirs('out/latent/')
     except OSError:
         pass
     for x in range(260, 261):
         #image_input = imfeat.resize_image(image_input, x, x)
         for sz in range(1, 6):
             print(sz)
             sz = 2**sz
             num_blocks = (np.floor(
                 np.asfarray(image_input.shape[:2]) / float(sz) + .5) - 2)
             print(num_blocks)
             if any(num_blocks <= 0):
                 continue
             #effective_size = (np.floor(np.asfarray(image_input.shape[:2]) / float(sz) + .5)) * sz
             feat = imfeat.HOGLatent(sz)
             im = image_input.copy()
             out = feat.make_feature_mask(im)
             print('Dims[%d]' % out.shape[2])
             for i in range(out.shape[2]):
                 out_s = out[:, :, i]
                 print(np.min(out_s))
                 print(np.max(out_s))
                 print('sz[%s]M[%s] m[%s]' %
                       (sz, np.max(out_s), np.min(out_s)))
                 out_s = np.array(
                     255 * (out_s - np.min(out_s)) /
                     (np.max(out_s) - np.min(out_s) + .000000001),
                     dtype=np.uint8)
                 cv2.imwrite('out/latent/lena-hog-%.3d-%.3d.png' % (sz, i),
                             out_s)
             y, x = np.random.randint(0, im.shape[0]), np.random.randint(
                 0, im.shape[1])
             im[y, x, :] += 100
             out2 = feat.make_feature_mask(im)
             if out[y, x, :].tolist() == out2[y, x, :].tolist():
                 num_eq += 1
                 print('-----------%s' % str((num_eq, num_neq)))
             else:
                 num_neq += 1
Esempio n. 26
0
 def test_patterns(self):
     feat = imfeat.LBP()
     for in_str, out_val in [('[1,1,1;1,2,1;1,1,1]', 255),
                             ('[1,1,1;1,1,1;1,1,1]', 0),
                             ('[0,1,1;1,1,1;1,1,1]', 1),
                             ('[1,0,1;1,1,1;1,1,1]', 2),
                             ('[1,1,0;1,1,1;1,1,1]', 4),
                             ('[1,1,1;1,1,0;1,1,1]', 8),
                             ('[1,1,1;1,1,1;1,1,0]', 16),
                             ('[1,1,1;1,1,1;1,0,1]', 32),
                             ('[1,1,1;1,1,1;0,1,1]', 64),
                             ('[1,1,1;0,1,1;1,1,1]', 128),
                             ('[0,1,1;0,1,1;1,1,1]', 129)]:
         data = np.array(np.mat(in_str).A, dtype=np.uint8)
         expected = np.ones((3, 3), dtype=np.uint8) * out_val
         out = feat.make_feature_mask(imfeat.convert_image(data, feat.MODES))
         np.testing.assert_equal(out, expected)
Esempio n. 27
0
 def test_patterns(self):
     feat = imfeat.LBP()
     for in_str, out_val in [('[1,1,1;1,2,1;1,1,1]', 255),
                             ('[1,1,1;1,1,1;1,1,1]', 0),
                             ('[0,1,1;1,1,1;1,1,1]', 1),
                             ('[1,0,1;1,1,1;1,1,1]', 2),
                             ('[1,1,0;1,1,1;1,1,1]', 4),
                             ('[1,1,1;1,1,0;1,1,1]', 8),
                             ('[1,1,1;1,1,1;1,1,0]', 16),
                             ('[1,1,1;1,1,1;1,0,1]', 32),
                             ('[1,1,1;1,1,1;0,1,1]', 64),
                             ('[1,1,1;0,1,1;1,1,1]', 128),
                             ('[0,1,1;0,1,1;1,1,1]', 129)]:
         data = np.array(np.mat(in_str).A, dtype=np.uint8)
         expected = np.ones((3, 3), dtype=np.uint8) * out_val
         out = feat.make_feature_mask(imfeat.convert_image(
             data, feat.MODES))
         np.testing.assert_equal(out, expected)
Esempio n. 28
0
 def __call__(self, image, num_components=None):
     if num_components is None:
         num_components = self.num_componenets
     image = imfeat.convert_image(image, {
         'type': 'numpy',
         'dtype': 'uint8',
         'mode': 'gray'
     })
     input_path = self.temp_dir + '/input.pgm'
     output_path = self.temp_dir + '/output.ppm'
     #image = Image.fromarray(image)
     #image.save(input_path)
     cv2.imwrite(input_path, image)
     cur_dir = os.path.abspath('.')
     os.chdir(self.temp_dir)
     cmd = './superpixels input.pgm output.ppm %d' % (num_components, )
     subprocess.call(cmd.split())
     out = cv2.imread(output_path)
     os.chdir(cur_dir)
     return out
Esempio n. 29
0
 def gen():
     try:
         frame_num = -1
         skip_next = None
         while True:
             frame = _read_ppm(proc.stdout)
             frame_num += 1
             if frame is None:
                 break
             if skip_next is not None:
                 if frame_num < skip_next: continue
             else:
                 if frame_num % mod != 0: continue
             skip_next = yield(frame_num,
                               frame_num / fps,
                               imfeat.convert_image(frame, image_modes))
     finally:
         # Kill the ffmpeg process early if the generator is destroyed
         proc.kill()
         proc.wait()
Esempio n. 30
0
def frame_iter(stream, image_modes, mod=1):
    SEEK_START_ATTEMPTS = 3
    # Use seek to find the first good frame
    for i in range(SEEK_START_ATTEMPTS):
        try:
            stream.tv.seek_to_frame(i)
        except IOError:
            continue
        else:
            break
    fps = stream.tv.get_fps()
    cnt = 0
    while 1:
        if cnt % mod == 0:
            _, num, frame = stream.tv.get_current_frame()[:3]
            yield num, num / fps, imfeat.convert_image(frame, image_modes)
        try:
            stream.tv.get_next_frame()
        except IOError:
            break
        cnt += 1
Esempio n. 31
0
 def test_save_lena(self):
     feat = imfeat.HOGLatent(8)
     image_input = imfeat.convert_image(cv2.imread('test_images/mosaic_001_01.jpg'), feat.MODES)#lena.jpg
     num_eq = 0
     num_neq = 0
     try:
         os.makedirs('out/latent/')
     except OSError:
         pass
     for x in range(260, 261):
         #image_input = imfeat.resize_image(image_input, x, x)
         for sz in range(1, 6):
             print(sz)
             sz = 2 ** sz
             num_blocks = (np.floor(np.asfarray(image_input.shape[:2]) / float(sz) + .5) - 2)
             print(num_blocks)
             if any(num_blocks <= 0):
                 continue
             #effective_size = (np.floor(np.asfarray(image_input.shape[:2]) / float(sz) + .5)) * sz
             feat = imfeat.HOGLatent(sz)
             im = image_input.copy()
             out = feat.make_feature_mask(im)
             print('Dims[%d]' % out.shape[2])
             for i in range(out.shape[2]):
                 out_s = out[:, :, i]
                 print(np.min(out_s))
                 print(np.max(out_s))
                 print('sz[%s]M[%s] m[%s]' % (sz, np.max(out_s), np.min(out_s)))
                 out_s = np.array(255 * (out_s - np.min(out_s)) / (np.max(out_s) - np.min(out_s) + .000000001), dtype=np.uint8)
                 cv2.imwrite('out/latent/lena-hog-%.3d-%.3d.png' % (sz, i), out_s)
             y, x = np.random.randint(0, im.shape[0]), np.random.randint(0, im.shape[1])
             im[y, x, :] += 100
             out2 = feat.make_feature_mask(im)
             if out[y, x, :].tolist() == out2[y, x, :].tolist():
                 num_eq += 1
                 print('-----------%s' % str((num_eq, num_neq)))
             else:
                 num_neq += 1
Esempio n. 32
0
def pil_to_cv(fp):
    return imfeat.convert_image(Image.open(fp), [('opencv', 'bgr', 8)])
Esempio n. 33
0
 def _load_cv_image(self, value):
     image = Image.open(StringIO.StringIO(value))
     image = image.convert('RGB')
     return image, imfeat.convert_image(image, [('opencv', 'rgb', 8)])
Esempio n. 34
0
 def _make_masks(self, image):
     image = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}])
     image_gray = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'gray', 'dtype': 'uint8'}])
     image_gradient = np.array(self.grad.make_feature_mask(np.array(image_gray, dtype=np.float32) / 255.) * 255, dtype=np.uint8)
     image_lab = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'lab', 'dtype': 'uint8'}])
     return np.ascontiguousarray(np.dstack([image, image_lab, image_gradient]), dtype=np.uint8)
Esempio n. 35
0
 def feat_func(self, frame):
     gray_frame = imfeat.convert_image(frame, [('opencv', 'gray', 8)])
     return {
         'surf': self._surf(gray_frame),
         'hist': self._hist_feat_func(gray_frame)
     }
Esempio n. 36
0
 def _feature(self, image):
     out = []
     for block, coords in imfeat.BlockGenerator(image, imfeat.CoordGeneratorRect, output_size=(self.sbin, self.sbin), step_delta=(self.sbin, self.sbin)):
         out.append(imfeat.convert_image(block, {'type': 'numpy', 'dtype': 'float32', 'mode': self.mode}).ravel())
     return np.asfarray(out)
Esempio n. 37
0
 def _load_cv_image(self, value):
     return imfeat.convert_image(Image.open(StringIO.StringIO(value)),
                                 [('opencv', 'rgb', 8)])
Esempio n. 38
0
 def _make_masks(self, image):
     image = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}])
     image_gray = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'gray', 'dtype': 'uint8'}])
     image_gradient = np.array(self.grad.make_feature_mask(np.array(image_gray, dtype=np.float32) / 255.) * 255, dtype=np.uint8)
     image_lab = imfeat.convert_image(image, [{'type': 'numpy', 'mode': 'lab', 'dtype': 'uint8'}])
     return np.ascontiguousarray(np.dstack([image, image_lab, image_gradient]), dtype=np.uint8)
Esempio n. 39
0
def test_tb(path):
    # test that we can read each file using _load_cv_image
    for (key, val) in hadoopy.readtb(path):
        print(key)
        i = imfeat.convert_image(Image.open(StringIO.StringIO(val)),
                                [('opencv', 'gray', 8)])
def cv_to_jpg(img):
    fp = StringIO.StringIO()
    imfeat.convert_image(img, ['RGB']).save(fp, 'JPEG')
    fp.seek(0)
    return fp.read()
    def map(self, event_filename, video_data):
        """

        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename), features) where features is a dict

            frame_features: List of frame features
            file_size: Size in bytes

            where each frame feature is a dictionary of

            frame_time: Time in seconds
            frame_num: Frame number
            prev_frame_num: Previous frame number (useful if there is a frame skip)
            keyframe: Boolean True/False
            surf: List of surf points (see impoint)
            face_widths:
            face_heights:
            predictions: Dictionary of predictions
        """
        sys.stderr.write('In Raw:%s\n' % str(event_filename))
        print(event_filename)
        ext = '.' + event_filename[1].rsplit('.')[1]
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            with self.timer('Writing video data'):
                fp.write(video_data)
                fp.flush()
            kf = keyframe.DecisionTree(min_interval=0)
            kf.load()
            prev_frame = None
            prev_frame_num = 0
            all_out = []
            sz = len(video_data)

            self.timer.start('KF')
            try:
                for (frame_num, frame_time, frame), iskeyframe in kf(viderator.frame_iter(fp.name,
                                                                                          frozen=True)):
                    hadoopy.counter('RawFeatures', 'NumFrames')
                    self.timer.stop('KF')
                    print(frame_time)
                    if frame_num > self._max_frames:
                        break
                    if frame_num % 100 == 0:
                        with self.timer('Computing face features'):
                            faces = _detect_faces(imfeat.convert_image(frame, [('opencv', 'gray', 8)]),
                                                  self.cascade)
                    else:
                        faces = {}
                    out = {'frame_time': frame_time, 'frame_num': frame_num,
                           'prev_frame_num': prev_frame_num, 'keyframe': iskeyframe,
                           'surf': kf.prev_vec['surf']}
                    if faces:  # If any faces
                        face_heights = np.array([x[0][3] for x in faces]) / float(frame.height)
                        face_widths = np.array([x[0][2] for x in faces]) / float(frame.width)
                        out['face_widths'] = face_widths
                        out['face_heights'] = face_heights
                    # Output the cur and previous frames if this is a keyframe
                    if iskeyframe and np.random.random() < self._frame_output_prob:
                            out['prev_frame'] = cv_to_jpg(prev_frame)
                            out['frame'] = cv_to_jpg(frame)
                    # Compute scene features
                    with self.timer('Computing scene classifier features'):
                        frame_res = cv.fromarray(cv2.resize(np.asarray(cv.GetMat(frame)), (self._image_width, self._image_height)))
                        feature = self._feat(frame_res)
                        out['predictions'] = dict((classifier_name, classifier.predict(feature))
                                                  for classifier_name, classifier in self._classifiers)
                    # Output JPEG with match lines from the SURF feature
                    if np.random.random() < self._match_line_prob and prev_frame:
                        out['surf_image'] = cv_to_jpg(plot_matches(prev_frame, kf.surf_debug['matches'], kf.surf_debug['points0'],
                                                                   kf.surf_debug['points1'], max_feat_width=kf.max_feat_width))
                    # Output data buffer
                    all_out.append(out)
                    if len(all_out) >= self._block_size:
                        with self.timer('Yield'):
                            yield event_filename, {'frame_features': all_out,
                                                   'file_size': sz}
                            all_out = []
                    prev_frame = frame
                    prev_frame_num = frame_num
                self.timer.start('KF')
            except viderator.FPSParseException:  # NOTE(brandyn): This will disregard videos with this error
                hadoopy.counter('SkippedVideos', 'FPSParseException')
                return
            if all_out:
                with self.timer('Yield'):
                    yield event_filename, {'frame_features': all_out,
                                           'file_size': sz}
Esempio n. 42
0
 def __call__(self, image):
     image_cv = imfeat.convert_image(image, self.MODES)
     return self.make_features(image_cv)[0]
Esempio n. 43
0
 def imread(self, fn):
     out = cv2.imread(fn)
     if out is not None:
         return out
     return imfeat.convert_image(Image.open(fn), {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
 def __init__(self, images=None, vectors=None, verbose=False):
     self.MODES = [('opencv', 'gray', 32)]
     self.verbose = verbose
     if not images == None:
         images = [imfeat.convert_image(i, self.MODES) for i in images]
         self.train(images, vectors)
 def __call__(self, image):
     image_cv = imfeat.convert_image(image, self.MODES)
     return self.make_features(image_cv)[0]
Esempio n. 46
0
 def __init__(self, images=None, vectors=None, verbose=False):
     self.MODES = [('opencv', 'gray', 32)]
     self.verbose = verbose
     if not images == None:
         images = [imfeat.convert_image(i, self.MODES) for i in images]
         self.train(images, vectors)
Esempio n. 47
0
    def map(self, event_filename, video_data):
        """

        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename), features) where features is a dict

            frame_features: List of frame features
            file_size: Size in bytes

            where each frame feature is a dictionary of

            frame_time: Time in seconds
            frame_num: Frame number
            prev_frame_num: Previous frame number (useful if there is a frame skip)
            keyframe: Boolean True/False
            surf: List of surf points (see impoint)
            face_widths:
            face_heights:
            predictions: Dictionary of predictions
        """
        sys.stderr.write('In Raw:%s\n' % str(event_filename))
        print(event_filename)
        ext = '.' + event_filename[1].rsplit('.')[1]
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            with self.timer('Writing video data'):
                fp.write(video_data)
                fp.flush()
            kf = keyframe.DecisionTree(min_interval=0)
            kf.load()
            prev_frame = None
            prev_frame_num = 0
            all_out = []
            sz = len(video_data)

            self.timer.start('KF')
            try:
                for (frame_num, frame_time, frame), iskeyframe in kf(
                        viderator.frame_iter(fp.name, frozen=True)):
                    hadoopy.counter('RawFeatures', 'NumFrames')
                    self.timer.stop('KF')
                    print(frame_time)
                    if frame_num > self._max_frames:
                        break
                    if frame_num % 100 == 0:
                        with self.timer('Computing face features'):
                            faces = _detect_faces(
                                imfeat.convert_image(frame,
                                                     [('opencv', 'gray', 8)]),
                                self.cascade)
                    else:
                        faces = {}
                    out = {
                        'frame_time': frame_time,
                        'frame_num': frame_num,
                        'prev_frame_num': prev_frame_num,
                        'keyframe': iskeyframe,
                        'surf': kf.prev_vec['surf']
                    }
                    if faces:  # If any faces
                        face_heights = np.array([x[0][3] for x in faces
                                                 ]) / float(frame.height)
                        face_widths = np.array([x[0][2] for x in faces
                                                ]) / float(frame.width)
                        out['face_widths'] = face_widths
                        out['face_heights'] = face_heights
                    # Output the cur and previous frames if this is a keyframe
                    if iskeyframe and np.random.random(
                    ) < self._frame_output_prob:
                        out['prev_frame'] = cv_to_jpg(prev_frame)
                        out['frame'] = cv_to_jpg(frame)
                    # Compute scene features
                    with self.timer('Computing scene classifier features'):
                        frame_res = cv.fromarray(
                            cv2.resize(
                                np.asarray(cv.GetMat(frame)),
                                (self._image_width, self._image_height)))
                        feature = self._feat(frame_res)
                        out['predictions'] = dict(
                            (classifier_name, classifier.predict(feature)) for
                            classifier_name, classifier in self._classifiers)
                    # Output JPEG with match lines from the SURF feature
                    if np.random.random(
                    ) < self._match_line_prob and prev_frame:
                        out['surf_image'] = cv_to_jpg(
                            plot_matches(prev_frame,
                                         kf.surf_debug['matches'],
                                         kf.surf_debug['points0'],
                                         kf.surf_debug['points1'],
                                         max_feat_width=kf.max_feat_width))
                    # Output data buffer
                    all_out.append(out)
                    if len(all_out) >= self._block_size:
                        with self.timer('Yield'):
                            yield event_filename, {
                                'frame_features': all_out,
                                'file_size': sz
                            }
                            all_out = []
                    prev_frame = frame
                    prev_frame_num = frame_num
                self.timer.start('KF')
            except viderator.FPSParseException:  # NOTE(brandyn): This will disregard videos with this error
                hadoopy.counter('SkippedVideos', 'FPSParseException')
                return
            if all_out:
                with self.timer('Yield'):
                    yield event_filename, {
                        'frame_features': all_out,
                        'file_size': sz
                    }
 def _load_cv_image(self, value):
     image = Image.open(StringIO.StringIO(value))
     image = image.convert('RGB')
     return image, imfeat.convert_image(image, [('opencv', 'rgb', 8)])
Esempio n. 49
0
 def _load_cv_image(self, value):
     return imfeat.convert_image(Image.open(StringIO.StringIO(value)),
                                 [('opencv', 'bgr', 8)])
Esempio n. 50
0
def cv_to_jpg(img):
    fp = StringIO.StringIO()
    imfeat.convert_image(img, ['RGB']).save(fp, 'JPEG')
    fp.seek(0)
    return fp.read()
Esempio n. 51
0
def convert_color(image):
    image = imfeat.convert_image(image, [('opencv', 'lab', cv.IPL_DEPTH_8U)])
    image = np.asarray(cv.GetMat(image)).copy()
    return image
Esempio n. 52
0
 def feat_func(self, frame):
     gray_frame = imfeat.convert_image(frame, [('opencv', 'gray', 8)])
     return {'surf': self._surf(gray_frame), 'hist': self._hist_feat_func(gray_frame)}